1 /* Optimize by combining instructions for GNU compiler. 2 Copyright (C) 1987-2018 Free Software Foundation, Inc. 3 4 This file is part of GCC. 5 6 GCC is free software; you can redistribute it and/or modify it under 7 the terms of the GNU General Public License as published by the Free 8 Software Foundation; either version 3, or (at your option) any later 9 version. 10 11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 12 WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with GCC; see the file COPYING3. If not see 18 <http://www.gnu.org/licenses/>. */ 19 20 /* This module is essentially the "combiner" phase of the U. of Arizona 21 Portable Optimizer, but redone to work on our list-structured 22 representation for RTL instead of their string representation. 23 24 The LOG_LINKS of each insn identify the most recent assignment 25 to each REG used in the insn. It is a list of previous insns, 26 each of which contains a SET for a REG that is used in this insn 27 and not used or set in between. LOG_LINKs never cross basic blocks. 28 They were set up by the preceding pass (lifetime analysis). 29 30 We try to combine each pair of insns joined by a logical link. 31 We also try to combine triplets of insns A, B and C when C has 32 a link back to B and B has a link back to A. Likewise for a 33 small number of quadruplets of insns A, B, C and D for which 34 there's high likelihood of success. 35 36 LOG_LINKS does not have links for use of the CC0. They don't 37 need to, because the insn that sets the CC0 is always immediately 38 before the insn that tests it. So we always regard a branch 39 insn as having a logical link to the preceding insn. The same is true 40 for an insn explicitly using CC0. 41 42 We check (with modified_between_p) to avoid combining in such a way 43 as to move a computation to a place where its value would be different. 44 45 Combination is done by mathematically substituting the previous 46 insn(s) values for the regs they set into the expressions in 47 the later insns that refer to these regs. If the result is a valid insn 48 for our target machine, according to the machine description, 49 we install it, delete the earlier insns, and update the data flow 50 information (LOG_LINKS and REG_NOTES) for what we did. 51 52 There are a few exceptions where the dataflow information isn't 53 completely updated (however this is only a local issue since it is 54 regenerated before the next pass that uses it): 55 56 - reg_live_length is not updated 57 - reg_n_refs is not adjusted in the rare case when a register is 58 no longer required in a computation 59 - there are extremely rare cases (see distribute_notes) when a 60 REG_DEAD note is lost 61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be 62 removed because there is no way to know which register it was 63 linking 64 65 To simplify substitution, we combine only when the earlier insn(s) 66 consist of only a single assignment. To simplify updating afterward, 67 we never combine when a subroutine call appears in the middle. 68 69 Since we do not represent assignments to CC0 explicitly except when that 70 is all an insn does, there is no LOG_LINKS entry in an insn that uses 71 the condition code for the insn that set the condition code. 72 Fortunately, these two insns must be consecutive. 73 Therefore, every JUMP_INSN is taken to have an implicit logical link 74 to the preceding insn. This is not quite right, since non-jumps can 75 also use the condition code; but in practice such insns would not 76 combine anyway. */ 77 78 #include "config.h" 79 #include "system.h" 80 #include "coretypes.h" 81 #include "backend.h" 82 #include "target.h" 83 #include "rtl.h" 84 #include "tree.h" 85 #include "cfghooks.h" 86 #include "predict.h" 87 #include "df.h" 88 #include "memmodel.h" 89 #include "tm_p.h" 90 #include "optabs.h" 91 #include "regs.h" 92 #include "emit-rtl.h" 93 #include "recog.h" 94 #include "cgraph.h" 95 #include "stor-layout.h" 96 #include "cfgrtl.h" 97 #include "cfgcleanup.h" 98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */ 99 #include "explow.h" 100 #include "insn-attr.h" 101 #include "rtlhooks-def.h" 102 #include "params.h" 103 #include "tree-pass.h" 104 #include "valtrack.h" 105 #include "rtl-iter.h" 106 #include "print-rtl.h" 107 108 /* Number of attempts to combine instructions in this function. */ 109 110 static int combine_attempts; 111 112 /* Number of attempts that got as far as substitution in this function. */ 113 114 static int combine_merges; 115 116 /* Number of instructions combined with added SETs in this function. */ 117 118 static int combine_extras; 119 120 /* Number of instructions combined in this function. */ 121 122 static int combine_successes; 123 124 /* Totals over entire compilation. */ 125 126 static int total_attempts, total_merges, total_extras, total_successes; 127 128 /* combine_instructions may try to replace the right hand side of the 129 second instruction with the value of an associated REG_EQUAL note 130 before throwing it at try_combine. That is problematic when there 131 is a REG_DEAD note for a register used in the old right hand side 132 and can cause distribute_notes to do wrong things. This is the 133 second instruction if it has been so modified, null otherwise. */ 134 135 static rtx_insn *i2mod; 136 137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */ 138 139 static rtx i2mod_old_rhs; 140 141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */ 142 143 static rtx i2mod_new_rhs; 144 145 struct reg_stat_type { 146 /* Record last point of death of (hard or pseudo) register n. */ 147 rtx_insn *last_death; 148 149 /* Record last point of modification of (hard or pseudo) register n. */ 150 rtx_insn *last_set; 151 152 /* The next group of fields allows the recording of the last value assigned 153 to (hard or pseudo) register n. We use this information to see if an 154 operation being processed is redundant given a prior operation performed 155 on the register. For example, an `and' with a constant is redundant if 156 all the zero bits are already known to be turned off. 157 158 We use an approach similar to that used by cse, but change it in the 159 following ways: 160 161 (1) We do not want to reinitialize at each label. 162 (2) It is useful, but not critical, to know the actual value assigned 163 to a register. Often just its form is helpful. 164 165 Therefore, we maintain the following fields: 166 167 last_set_value the last value assigned 168 last_set_label records the value of label_tick when the 169 register was assigned 170 last_set_table_tick records the value of label_tick when a 171 value using the register is assigned 172 last_set_invalid set to nonzero when it is not valid 173 to use the value of this register in some 174 register's value 175 176 To understand the usage of these tables, it is important to understand 177 the distinction between the value in last_set_value being valid and 178 the register being validly contained in some other expression in the 179 table. 180 181 (The next two parameters are out of date). 182 183 reg_stat[i].last_set_value is valid if it is nonzero, and either 184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick. 185 186 Register I may validly appear in any expression returned for the value 187 of another register if reg_n_sets[i] is 1. It may also appear in the 188 value for register J if reg_stat[j].last_set_invalid is zero, or 189 reg_stat[i].last_set_label < reg_stat[j].last_set_label. 190 191 If an expression is found in the table containing a register which may 192 not validly appear in an expression, the register is replaced by 193 something that won't match, (clobber (const_int 0)). */ 194 195 /* Record last value assigned to (hard or pseudo) register n. */ 196 197 rtx last_set_value; 198 199 /* Record the value of label_tick when an expression involving register n 200 is placed in last_set_value. */ 201 202 int last_set_table_tick; 203 204 /* Record the value of label_tick when the value for register n is placed in 205 last_set_value. */ 206 207 int last_set_label; 208 209 /* These fields are maintained in parallel with last_set_value and are 210 used to store the mode in which the register was last set, the bits 211 that were known to be zero when it was last set, and the number of 212 sign bits copies it was known to have when it was last set. */ 213 214 unsigned HOST_WIDE_INT last_set_nonzero_bits; 215 char last_set_sign_bit_copies; 216 ENUM_BITFIELD(machine_mode) last_set_mode : 8; 217 218 /* Set nonzero if references to register n in expressions should not be 219 used. last_set_invalid is set nonzero when this register is being 220 assigned to and last_set_table_tick == label_tick. */ 221 222 char last_set_invalid; 223 224 /* Some registers that are set more than once and used in more than one 225 basic block are nevertheless always set in similar ways. For example, 226 a QImode register may be loaded from memory in two places on a machine 227 where byte loads zero extend. 228 229 We record in the following fields if a register has some leading bits 230 that are always equal to the sign bit, and what we know about the 231 nonzero bits of a register, specifically which bits are known to be 232 zero. 233 234 If an entry is zero, it means that we don't know anything special. */ 235 236 unsigned char sign_bit_copies; 237 238 unsigned HOST_WIDE_INT nonzero_bits; 239 240 /* Record the value of the label_tick when the last truncation 241 happened. The field truncated_to_mode is only valid if 242 truncation_label == label_tick. */ 243 244 int truncation_label; 245 246 /* Record the last truncation seen for this register. If truncation 247 is not a nop to this mode we might be able to save an explicit 248 truncation if we know that value already contains a truncated 249 value. */ 250 251 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8; 252 }; 253 254 255 static vec<reg_stat_type> reg_stat; 256 257 /* One plus the highest pseudo for which we track REG_N_SETS. 258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once, 259 but during combine_split_insns new pseudos can be created. As we don't have 260 updated DF information in that case, it is hard to initialize the array 261 after growing. The combiner only cares about REG_N_SETS (regno) == 1, 262 so instead of growing the arrays, just assume all newly created pseudos 263 during combine might be set multiple times. */ 264 265 static unsigned int reg_n_sets_max; 266 267 /* Record the luid of the last insn that invalidated memory 268 (anything that writes memory, and subroutine calls, but not pushes). */ 269 270 static int mem_last_set; 271 272 /* Record the luid of the last CALL_INSN 273 so we can tell whether a potential combination crosses any calls. */ 274 275 static int last_call_luid; 276 277 /* When `subst' is called, this is the insn that is being modified 278 (by combining in a previous insn). The PATTERN of this insn 279 is still the old pattern partially modified and it should not be 280 looked at, but this may be used to examine the successors of the insn 281 to judge whether a simplification is valid. */ 282 283 static rtx_insn *subst_insn; 284 285 /* This is the lowest LUID that `subst' is currently dealing with. 286 get_last_value will not return a value if the register was set at or 287 after this LUID. If not for this mechanism, we could get confused if 288 I2 or I1 in try_combine were an insn that used the old value of a register 289 to obtain a new value. In that case, we might erroneously get the 290 new value of the register when we wanted the old one. */ 291 292 static int subst_low_luid; 293 294 /* This contains any hard registers that are used in newpat; reg_dead_at_p 295 must consider all these registers to be always live. */ 296 297 static HARD_REG_SET newpat_used_regs; 298 299 /* This is an insn to which a LOG_LINKS entry has been added. If this 300 insn is the earlier than I2 or I3, combine should rescan starting at 301 that location. */ 302 303 static rtx_insn *added_links_insn; 304 305 /* And similarly, for notes. */ 306 307 static rtx_insn *added_notes_insn; 308 309 /* Basic block in which we are performing combines. */ 310 static basic_block this_basic_block; 311 static bool optimize_this_for_speed_p; 312 313 314 /* Length of the currently allocated uid_insn_cost array. */ 315 316 static int max_uid_known; 317 318 /* The following array records the insn_cost for every insn 319 in the instruction stream. */ 320 321 static int *uid_insn_cost; 322 323 /* The following array records the LOG_LINKS for every insn in the 324 instruction stream as struct insn_link pointers. */ 325 326 struct insn_link { 327 rtx_insn *insn; 328 unsigned int regno; 329 struct insn_link *next; 330 }; 331 332 static struct insn_link **uid_log_links; 333 334 static inline int 335 insn_uid_check (const_rtx insn) 336 { 337 int uid = INSN_UID (insn); 338 gcc_checking_assert (uid <= max_uid_known); 339 return uid; 340 } 341 342 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)]) 343 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)]) 344 345 #define FOR_EACH_LOG_LINK(L, INSN) \ 346 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next) 347 348 /* Links for LOG_LINKS are allocated from this obstack. */ 349 350 static struct obstack insn_link_obstack; 351 352 /* Allocate a link. */ 353 354 static inline struct insn_link * 355 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next) 356 { 357 struct insn_link *l 358 = (struct insn_link *) obstack_alloc (&insn_link_obstack, 359 sizeof (struct insn_link)); 360 l->insn = insn; 361 l->regno = regno; 362 l->next = next; 363 return l; 364 } 365 366 /* Incremented for each basic block. */ 367 368 static int label_tick; 369 370 /* Reset to label_tick for each extended basic block in scanning order. */ 371 372 static int label_tick_ebb_start; 373 374 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the 375 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */ 376 377 static scalar_int_mode nonzero_bits_mode; 378 379 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can 380 be safely used. It is zero while computing them and after combine has 381 completed. This former test prevents propagating values based on 382 previously set values, which can be incorrect if a variable is modified 383 in a loop. */ 384 385 static int nonzero_sign_valid; 386 387 388 /* Record one modification to rtl structure 389 to be undone by storing old_contents into *where. */ 390 391 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS }; 392 393 struct undo 394 { 395 struct undo *next; 396 enum undo_kind kind; 397 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents; 398 union { rtx *r; int *i; struct insn_link **l; } where; 399 }; 400 401 /* Record a bunch of changes to be undone, up to MAX_UNDO of them. 402 num_undo says how many are currently recorded. 403 404 other_insn is nonzero if we have modified some other insn in the process 405 of working on subst_insn. It must be verified too. */ 406 407 struct undobuf 408 { 409 struct undo *undos; 410 struct undo *frees; 411 rtx_insn *other_insn; 412 }; 413 414 static struct undobuf undobuf; 415 416 /* Number of times the pseudo being substituted for 417 was found and replaced. */ 418 419 static int n_occurrences; 420 421 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode, 422 scalar_int_mode, 423 unsigned HOST_WIDE_INT *); 424 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode, 425 scalar_int_mode, 426 unsigned int *); 427 static void do_SUBST (rtx *, rtx); 428 static void do_SUBST_INT (int *, int); 429 static void init_reg_last (void); 430 static void setup_incoming_promotions (rtx_insn *); 431 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *); 432 static int cant_combine_insn_p (rtx_insn *); 433 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *, 434 rtx_insn *, rtx_insn *, rtx *, rtx *); 435 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *); 436 static int contains_muldiv (rtx); 437 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *, 438 int *, rtx_insn *); 439 static void undo_all (void); 440 static void undo_commit (void); 441 static rtx *find_split_point (rtx *, rtx_insn *, bool); 442 static rtx subst (rtx, rtx, rtx, int, int, int); 443 static rtx combine_simplify_rtx (rtx, machine_mode, int, int); 444 static rtx simplify_if_then_else (rtx); 445 static rtx simplify_set (rtx); 446 static rtx simplify_logical (rtx); 447 static rtx expand_compound_operation (rtx); 448 static const_rtx expand_field_assignment (const_rtx); 449 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT, 450 rtx, unsigned HOST_WIDE_INT, int, int, int); 451 static int get_pos_from_mask (unsigned HOST_WIDE_INT, 452 unsigned HOST_WIDE_INT *); 453 static rtx canon_reg_for_combine (rtx, rtx); 454 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode, 455 scalar_int_mode, unsigned HOST_WIDE_INT, int); 456 static rtx force_to_mode (rtx, machine_mode, 457 unsigned HOST_WIDE_INT, int); 458 static rtx if_then_else_cond (rtx, rtx *, rtx *); 459 static rtx known_cond (rtx, enum rtx_code, rtx, rtx); 460 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false); 461 static rtx make_field_assignment (rtx); 462 static rtx apply_distributive_law (rtx); 463 static rtx distribute_and_simplify_rtx (rtx, int); 464 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx, 465 unsigned HOST_WIDE_INT); 466 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx, 467 unsigned HOST_WIDE_INT); 468 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code, 469 HOST_WIDE_INT, machine_mode, int *); 470 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int); 471 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx, 472 int); 473 static int recog_for_combine (rtx *, rtx_insn *, rtx *); 474 static rtx gen_lowpart_for_combine (machine_mode, rtx); 475 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode, 476 rtx, rtx *); 477 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *); 478 static void update_table_tick (rtx); 479 static void record_value_for_reg (rtx, rtx_insn *, rtx); 480 static void check_promoted_subreg (rtx_insn *, rtx); 481 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *); 482 static void record_dead_and_set_regs (rtx_insn *); 483 static int get_last_value_validate (rtx *, rtx_insn *, int, int); 484 static rtx get_last_value (const_rtx); 485 static void reg_dead_at_p_1 (rtx, const_rtx, void *); 486 static int reg_dead_at_p (rtx, rtx_insn *); 487 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *); 488 static int reg_bitfield_target_p (rtx, rtx); 489 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx); 490 static void distribute_links (struct insn_link *); 491 static void mark_used_regs_combine (rtx); 492 static void record_promoted_value (rtx_insn *, rtx); 493 static bool unmentioned_reg_p (rtx, rtx); 494 static void record_truncated_values (rtx *, void *); 495 static bool reg_truncated_to_mode (machine_mode, const_rtx); 496 static rtx gen_lowpart_or_truncate (machine_mode, rtx); 497 498 499 /* It is not safe to use ordinary gen_lowpart in combine. 500 See comments in gen_lowpart_for_combine. */ 501 #undef RTL_HOOKS_GEN_LOWPART 502 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine 503 504 /* Our implementation of gen_lowpart never emits a new pseudo. */ 505 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT 506 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine 507 508 #undef RTL_HOOKS_REG_NONZERO_REG_BITS 509 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine 510 511 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES 512 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine 513 514 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE 515 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode 516 517 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER; 518 519 520 /* Convenience wrapper for the canonicalize_comparison target hook. 521 Target hooks cannot use enum rtx_code. */ 522 static inline void 523 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1, 524 bool op0_preserve_value) 525 { 526 int code_int = (int)*code; 527 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value); 528 *code = (enum rtx_code)code_int; 529 } 530 531 /* Try to split PATTERN found in INSN. This returns NULL_RTX if 532 PATTERN can not be split. Otherwise, it returns an insn sequence. 533 This is a wrapper around split_insns which ensures that the 534 reg_stat vector is made larger if the splitter creates a new 535 register. */ 536 537 static rtx_insn * 538 combine_split_insns (rtx pattern, rtx_insn *insn) 539 { 540 rtx_insn *ret; 541 unsigned int nregs; 542 543 ret = split_insns (pattern, insn); 544 nregs = max_reg_num (); 545 if (nregs > reg_stat.length ()) 546 reg_stat.safe_grow_cleared (nregs); 547 return ret; 548 } 549 550 /* This is used by find_single_use to locate an rtx in LOC that 551 contains exactly one use of DEST, which is typically either a REG 552 or CC0. It returns a pointer to the innermost rtx expression 553 containing DEST. Appearances of DEST that are being used to 554 totally replace it are not counted. */ 555 556 static rtx * 557 find_single_use_1 (rtx dest, rtx *loc) 558 { 559 rtx x = *loc; 560 enum rtx_code code = GET_CODE (x); 561 rtx *result = NULL; 562 rtx *this_result; 563 int i; 564 const char *fmt; 565 566 switch (code) 567 { 568 case CONST: 569 case LABEL_REF: 570 case SYMBOL_REF: 571 CASE_CONST_ANY: 572 case CLOBBER: 573 return 0; 574 575 case SET: 576 /* If the destination is anything other than CC0, PC, a REG or a SUBREG 577 of a REG that occupies all of the REG, the insn uses DEST if 578 it is mentioned in the destination or the source. Otherwise, we 579 need just check the source. */ 580 if (GET_CODE (SET_DEST (x)) != CC0 581 && GET_CODE (SET_DEST (x)) != PC 582 && !REG_P (SET_DEST (x)) 583 && ! (GET_CODE (SET_DEST (x)) == SUBREG 584 && REG_P (SUBREG_REG (SET_DEST (x))) 585 && !read_modify_subreg_p (SET_DEST (x)))) 586 break; 587 588 return find_single_use_1 (dest, &SET_SRC (x)); 589 590 case MEM: 591 case SUBREG: 592 return find_single_use_1 (dest, &XEXP (x, 0)); 593 594 default: 595 break; 596 } 597 598 /* If it wasn't one of the common cases above, check each expression and 599 vector of this code. Look for a unique usage of DEST. */ 600 601 fmt = GET_RTX_FORMAT (code); 602 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) 603 { 604 if (fmt[i] == 'e') 605 { 606 if (dest == XEXP (x, i) 607 || (REG_P (dest) && REG_P (XEXP (x, i)) 608 && REGNO (dest) == REGNO (XEXP (x, i)))) 609 this_result = loc; 610 else 611 this_result = find_single_use_1 (dest, &XEXP (x, i)); 612 613 if (result == NULL) 614 result = this_result; 615 else if (this_result) 616 /* Duplicate usage. */ 617 return NULL; 618 } 619 else if (fmt[i] == 'E') 620 { 621 int j; 622 623 for (j = XVECLEN (x, i) - 1; j >= 0; j--) 624 { 625 if (XVECEXP (x, i, j) == dest 626 || (REG_P (dest) 627 && REG_P (XVECEXP (x, i, j)) 628 && REGNO (XVECEXP (x, i, j)) == REGNO (dest))) 629 this_result = loc; 630 else 631 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j)); 632 633 if (result == NULL) 634 result = this_result; 635 else if (this_result) 636 return NULL; 637 } 638 } 639 } 640 641 return result; 642 } 643 644 645 /* See if DEST, produced in INSN, is used only a single time in the 646 sequel. If so, return a pointer to the innermost rtx expression in which 647 it is used. 648 649 If PLOC is nonzero, *PLOC is set to the insn containing the single use. 650 651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't 652 care about REG_DEAD notes or LOG_LINKS. 653 654 Otherwise, we find the single use by finding an insn that has a 655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is 656 only referenced once in that insn, we know that it must be the first 657 and last insn referencing DEST. */ 658 659 static rtx * 660 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc) 661 { 662 basic_block bb; 663 rtx_insn *next; 664 rtx *result; 665 struct insn_link *link; 666 667 if (dest == cc0_rtx) 668 { 669 next = NEXT_INSN (insn); 670 if (next == 0 671 || (!NONJUMP_INSN_P (next) && !JUMP_P (next))) 672 return 0; 673 674 result = find_single_use_1 (dest, &PATTERN (next)); 675 if (result && ploc) 676 *ploc = next; 677 return result; 678 } 679 680 if (!REG_P (dest)) 681 return 0; 682 683 bb = BLOCK_FOR_INSN (insn); 684 for (next = NEXT_INSN (insn); 685 next && BLOCK_FOR_INSN (next) == bb; 686 next = NEXT_INSN (next)) 687 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest)) 688 { 689 FOR_EACH_LOG_LINK (link, next) 690 if (link->insn == insn && link->regno == REGNO (dest)) 691 break; 692 693 if (link) 694 { 695 result = find_single_use_1 (dest, &PATTERN (next)); 696 if (ploc) 697 *ploc = next; 698 return result; 699 } 700 } 701 702 return 0; 703 } 704 705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some 706 insn. The substitution can be undone by undo_all. If INTO is already 707 set to NEWVAL, do not record this change. Because computing NEWVAL might 708 also call SUBST, we have to compute it before we put anything into 709 the undo table. */ 710 711 static void 712 do_SUBST (rtx *into, rtx newval) 713 { 714 struct undo *buf; 715 rtx oldval = *into; 716 717 if (oldval == newval) 718 return; 719 720 /* We'd like to catch as many invalid transformations here as 721 possible. Unfortunately, there are way too many mode changes 722 that are perfectly valid, so we'd waste too much effort for 723 little gain doing the checks here. Focus on catching invalid 724 transformations involving integer constants. */ 725 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT 726 && CONST_INT_P (newval)) 727 { 728 /* Sanity check that we're replacing oldval with a CONST_INT 729 that is a valid sign-extension for the original mode. */ 730 gcc_assert (INTVAL (newval) 731 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval))); 732 733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a 734 CONST_INT is not valid, because after the replacement, the 735 original mode would be gone. Unfortunately, we can't tell 736 when do_SUBST is called to replace the operand thereof, so we 737 perform this test on oldval instead, checking whether an 738 invalid replacement took place before we got here. */ 739 gcc_assert (!(GET_CODE (oldval) == SUBREG 740 && CONST_INT_P (SUBREG_REG (oldval)))); 741 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND 742 && CONST_INT_P (XEXP (oldval, 0)))); 743 } 744 745 if (undobuf.frees) 746 buf = undobuf.frees, undobuf.frees = buf->next; 747 else 748 buf = XNEW (struct undo); 749 750 buf->kind = UNDO_RTX; 751 buf->where.r = into; 752 buf->old_contents.r = oldval; 753 *into = newval; 754 755 buf->next = undobuf.undos, undobuf.undos = buf; 756 } 757 758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL)) 759 760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution 761 for the value of a HOST_WIDE_INT value (including CONST_INT) is 762 not safe. */ 763 764 static void 765 do_SUBST_INT (int *into, int newval) 766 { 767 struct undo *buf; 768 int oldval = *into; 769 770 if (oldval == newval) 771 return; 772 773 if (undobuf.frees) 774 buf = undobuf.frees, undobuf.frees = buf->next; 775 else 776 buf = XNEW (struct undo); 777 778 buf->kind = UNDO_INT; 779 buf->where.i = into; 780 buf->old_contents.i = oldval; 781 *into = newval; 782 783 buf->next = undobuf.undos, undobuf.undos = buf; 784 } 785 786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL)) 787 788 /* Similar to SUBST, but just substitute the mode. This is used when 789 changing the mode of a pseudo-register, so that any other 790 references to the entry in the regno_reg_rtx array will change as 791 well. */ 792 793 static void 794 do_SUBST_MODE (rtx *into, machine_mode newval) 795 { 796 struct undo *buf; 797 machine_mode oldval = GET_MODE (*into); 798 799 if (oldval == newval) 800 return; 801 802 if (undobuf.frees) 803 buf = undobuf.frees, undobuf.frees = buf->next; 804 else 805 buf = XNEW (struct undo); 806 807 buf->kind = UNDO_MODE; 808 buf->where.r = into; 809 buf->old_contents.m = oldval; 810 adjust_reg_mode (*into, newval); 811 812 buf->next = undobuf.undos, undobuf.undos = buf; 813 } 814 815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL)) 816 817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */ 818 819 static void 820 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval) 821 { 822 struct undo *buf; 823 struct insn_link * oldval = *into; 824 825 if (oldval == newval) 826 return; 827 828 if (undobuf.frees) 829 buf = undobuf.frees, undobuf.frees = buf->next; 830 else 831 buf = XNEW (struct undo); 832 833 buf->kind = UNDO_LINKS; 834 buf->where.l = into; 835 buf->old_contents.l = oldval; 836 *into = newval; 837 838 buf->next = undobuf.undos, undobuf.undos = buf; 839 } 840 841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval) 842 843 /* Subroutine of try_combine. Determine whether the replacement patterns 844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost 845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note 846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and 847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost 848 of all the instructions can be estimated and the replacements are more 849 expensive than the original sequence. */ 850 851 static bool 852 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3, 853 rtx newpat, rtx newi2pat, rtx newotherpat) 854 { 855 int i0_cost, i1_cost, i2_cost, i3_cost; 856 int new_i2_cost, new_i3_cost; 857 int old_cost, new_cost; 858 859 /* Lookup the original insn_costs. */ 860 i2_cost = INSN_COST (i2); 861 i3_cost = INSN_COST (i3); 862 863 if (i1) 864 { 865 i1_cost = INSN_COST (i1); 866 if (i0) 867 { 868 i0_cost = INSN_COST (i0); 869 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0 870 ? i0_cost + i1_cost + i2_cost + i3_cost : 0); 871 } 872 else 873 { 874 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0 875 ? i1_cost + i2_cost + i3_cost : 0); 876 i0_cost = 0; 877 } 878 } 879 else 880 { 881 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0; 882 i1_cost = i0_cost = 0; 883 } 884 885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice; 886 correct that. */ 887 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2)) 888 old_cost -= i1_cost; 889 890 891 /* Calculate the replacement insn_costs. */ 892 rtx tmp = PATTERN (i3); 893 PATTERN (i3) = newpat; 894 int tmpi = INSN_CODE (i3); 895 INSN_CODE (i3) = -1; 896 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p); 897 PATTERN (i3) = tmp; 898 INSN_CODE (i3) = tmpi; 899 if (newi2pat) 900 { 901 tmp = PATTERN (i2); 902 PATTERN (i2) = newi2pat; 903 tmpi = INSN_CODE (i2); 904 INSN_CODE (i2) = -1; 905 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p); 906 PATTERN (i2) = tmp; 907 INSN_CODE (i2) = tmpi; 908 new_cost = (new_i2_cost > 0 && new_i3_cost > 0) 909 ? new_i2_cost + new_i3_cost : 0; 910 } 911 else 912 { 913 new_cost = new_i3_cost; 914 new_i2_cost = 0; 915 } 916 917 if (undobuf.other_insn) 918 { 919 int old_other_cost, new_other_cost; 920 921 old_other_cost = INSN_COST (undobuf.other_insn); 922 tmp = PATTERN (undobuf.other_insn); 923 PATTERN (undobuf.other_insn) = newotherpat; 924 tmpi = INSN_CODE (undobuf.other_insn); 925 INSN_CODE (undobuf.other_insn) = -1; 926 new_other_cost = insn_cost (undobuf.other_insn, 927 optimize_this_for_speed_p); 928 PATTERN (undobuf.other_insn) = tmp; 929 INSN_CODE (undobuf.other_insn) = tmpi; 930 if (old_other_cost > 0 && new_other_cost > 0) 931 { 932 old_cost += old_other_cost; 933 new_cost += new_other_cost; 934 } 935 else 936 old_cost = 0; 937 } 938 939 /* Disallow this combination if both new_cost and old_cost are greater than 940 zero, and new_cost is greater than old cost. */ 941 int reject = old_cost > 0 && new_cost > old_cost; 942 943 if (dump_file) 944 { 945 fprintf (dump_file, "%s combination of insns ", 946 reject ? "rejecting" : "allowing"); 947 if (i0) 948 fprintf (dump_file, "%d, ", INSN_UID (i0)); 949 if (i1 && INSN_UID (i1) != INSN_UID (i2)) 950 fprintf (dump_file, "%d, ", INSN_UID (i1)); 951 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3)); 952 953 fprintf (dump_file, "original costs "); 954 if (i0) 955 fprintf (dump_file, "%d + ", i0_cost); 956 if (i1 && INSN_UID (i1) != INSN_UID (i2)) 957 fprintf (dump_file, "%d + ", i1_cost); 958 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost); 959 960 if (newi2pat) 961 fprintf (dump_file, "replacement costs %d + %d = %d\n", 962 new_i2_cost, new_i3_cost, new_cost); 963 else 964 fprintf (dump_file, "replacement cost %d\n", new_cost); 965 } 966 967 if (reject) 968 return false; 969 970 /* Update the uid_insn_cost array with the replacement costs. */ 971 INSN_COST (i2) = new_i2_cost; 972 INSN_COST (i3) = new_i3_cost; 973 if (i1) 974 { 975 INSN_COST (i1) = 0; 976 if (i0) 977 INSN_COST (i0) = 0; 978 } 979 980 return true; 981 } 982 983 984 /* Delete any insns that copy a register to itself. */ 985 986 static void 987 delete_noop_moves (void) 988 { 989 rtx_insn *insn, *next; 990 basic_block bb; 991 992 FOR_EACH_BB_FN (bb, cfun) 993 { 994 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next) 995 { 996 next = NEXT_INSN (insn); 997 if (INSN_P (insn) && noop_move_p (insn)) 998 { 999 if (dump_file) 1000 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn)); 1001 1002 delete_insn_and_edges (insn); 1003 } 1004 } 1005 } 1006 } 1007 1008 1009 /* Return false if we do not want to (or cannot) combine DEF. */ 1010 static bool 1011 can_combine_def_p (df_ref def) 1012 { 1013 /* Do not consider if it is pre/post modification in MEM. */ 1014 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY) 1015 return false; 1016 1017 unsigned int regno = DF_REF_REGNO (def); 1018 1019 /* Do not combine frame pointer adjustments. */ 1020 if ((regno == FRAME_POINTER_REGNUM 1021 && (!reload_completed || frame_pointer_needed)) 1022 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER 1023 && regno == HARD_FRAME_POINTER_REGNUM 1024 && (!reload_completed || frame_pointer_needed)) 1025 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM 1026 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])) 1027 return false; 1028 1029 return true; 1030 } 1031 1032 /* Return false if we do not want to (or cannot) combine USE. */ 1033 static bool 1034 can_combine_use_p (df_ref use) 1035 { 1036 /* Do not consider the usage of the stack pointer by function call. */ 1037 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE) 1038 return false; 1039 1040 return true; 1041 } 1042 1043 /* Fill in log links field for all insns. */ 1044 1045 static void 1046 create_log_links (void) 1047 { 1048 basic_block bb; 1049 rtx_insn **next_use; 1050 rtx_insn *insn; 1051 df_ref def, use; 1052 1053 next_use = XCNEWVEC (rtx_insn *, max_reg_num ()); 1054 1055 /* Pass through each block from the end, recording the uses of each 1056 register and establishing log links when def is encountered. 1057 Note that we do not clear next_use array in order to save time, 1058 so we have to test whether the use is in the same basic block as def. 1059 1060 There are a few cases below when we do not consider the definition or 1061 usage -- these are taken from original flow.c did. Don't ask me why it is 1062 done this way; I don't know and if it works, I don't want to know. */ 1063 1064 FOR_EACH_BB_FN (bb, cfun) 1065 { 1066 FOR_BB_INSNS_REVERSE (bb, insn) 1067 { 1068 if (!NONDEBUG_INSN_P (insn)) 1069 continue; 1070 1071 /* Log links are created only once. */ 1072 gcc_assert (!LOG_LINKS (insn)); 1073 1074 FOR_EACH_INSN_DEF (def, insn) 1075 { 1076 unsigned int regno = DF_REF_REGNO (def); 1077 rtx_insn *use_insn; 1078 1079 if (!next_use[regno]) 1080 continue; 1081 1082 if (!can_combine_def_p (def)) 1083 continue; 1084 1085 use_insn = next_use[regno]; 1086 next_use[regno] = NULL; 1087 1088 if (BLOCK_FOR_INSN (use_insn) != bb) 1089 continue; 1090 1091 /* flow.c claimed: 1092 1093 We don't build a LOG_LINK for hard registers contained 1094 in ASM_OPERANDs. If these registers get replaced, 1095 we might wind up changing the semantics of the insn, 1096 even if reload can make what appear to be valid 1097 assignments later. */ 1098 if (regno < FIRST_PSEUDO_REGISTER 1099 && asm_noperands (PATTERN (use_insn)) >= 0) 1100 continue; 1101 1102 /* Don't add duplicate links between instructions. */ 1103 struct insn_link *links; 1104 FOR_EACH_LOG_LINK (links, use_insn) 1105 if (insn == links->insn && regno == links->regno) 1106 break; 1107 1108 if (!links) 1109 LOG_LINKS (use_insn) 1110 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn)); 1111 } 1112 1113 FOR_EACH_INSN_USE (use, insn) 1114 if (can_combine_use_p (use)) 1115 next_use[DF_REF_REGNO (use)] = insn; 1116 } 1117 } 1118 1119 free (next_use); 1120 } 1121 1122 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return 1123 true if we found a LOG_LINK that proves that A feeds B. This only works 1124 if there are no instructions between A and B which could have a link 1125 depending on A, since in that case we would not record a link for B. 1126 We also check the implicit dependency created by a cc0 setter/user 1127 pair. */ 1128 1129 static bool 1130 insn_a_feeds_b (rtx_insn *a, rtx_insn *b) 1131 { 1132 struct insn_link *links; 1133 FOR_EACH_LOG_LINK (links, b) 1134 if (links->insn == a) 1135 return true; 1136 if (HAVE_cc0 && sets_cc0_p (a)) 1137 return true; 1138 return false; 1139 } 1140 1141 /* Main entry point for combiner. F is the first insn of the function. 1142 NREGS is the first unused pseudo-reg number. 1143 1144 Return nonzero if the combiner has turned an indirect jump 1145 instruction into a direct jump. */ 1146 static int 1147 combine_instructions (rtx_insn *f, unsigned int nregs) 1148 { 1149 rtx_insn *insn, *next; 1150 rtx_insn *prev; 1151 struct insn_link *links, *nextlinks; 1152 rtx_insn *first; 1153 basic_block last_bb; 1154 1155 int new_direct_jump_p = 0; 1156 1157 for (first = f; first && !NONDEBUG_INSN_P (first); ) 1158 first = NEXT_INSN (first); 1159 if (!first) 1160 return 0; 1161 1162 combine_attempts = 0; 1163 combine_merges = 0; 1164 combine_extras = 0; 1165 combine_successes = 0; 1166 1167 rtl_hooks = combine_rtl_hooks; 1168 1169 reg_stat.safe_grow_cleared (nregs); 1170 1171 init_recog_no_volatile (); 1172 1173 /* Allocate array for insn info. */ 1174 max_uid_known = get_max_uid (); 1175 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1); 1176 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1); 1177 gcc_obstack_init (&insn_link_obstack); 1178 1179 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require (); 1180 1181 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause 1182 problems when, for example, we have j <<= 1 in a loop. */ 1183 1184 nonzero_sign_valid = 0; 1185 label_tick = label_tick_ebb_start = 1; 1186 1187 /* Scan all SETs and see if we can deduce anything about what 1188 bits are known to be zero for some registers and how many copies 1189 of the sign bit are known to exist for those registers. 1190 1191 Also set any known values so that we can use it while searching 1192 for what bits are known to be set. */ 1193 1194 setup_incoming_promotions (first); 1195 /* Allow the entry block and the first block to fall into the same EBB. 1196 Conceptually the incoming promotions are assigned to the entry block. */ 1197 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun); 1198 1199 create_log_links (); 1200 FOR_EACH_BB_FN (this_basic_block, cfun) 1201 { 1202 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block); 1203 last_call_luid = 0; 1204 mem_last_set = -1; 1205 1206 label_tick++; 1207 if (!single_pred_p (this_basic_block) 1208 || single_pred (this_basic_block) != last_bb) 1209 label_tick_ebb_start = label_tick; 1210 last_bb = this_basic_block; 1211 1212 FOR_BB_INSNS (this_basic_block, insn) 1213 if (INSN_P (insn) && BLOCK_FOR_INSN (insn)) 1214 { 1215 rtx links; 1216 1217 subst_low_luid = DF_INSN_LUID (insn); 1218 subst_insn = insn; 1219 1220 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies, 1221 insn); 1222 record_dead_and_set_regs (insn); 1223 1224 if (AUTO_INC_DEC) 1225 for (links = REG_NOTES (insn); links; links = XEXP (links, 1)) 1226 if (REG_NOTE_KIND (links) == REG_INC) 1227 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX, 1228 insn); 1229 1230 /* Record the current insn_cost of this instruction. */ 1231 if (NONJUMP_INSN_P (insn)) 1232 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p); 1233 if (dump_file) 1234 { 1235 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn)); 1236 dump_insn_slim (dump_file, insn); 1237 } 1238 } 1239 } 1240 1241 nonzero_sign_valid = 1; 1242 1243 /* Now scan all the insns in forward order. */ 1244 label_tick = label_tick_ebb_start = 1; 1245 init_reg_last (); 1246 setup_incoming_promotions (first); 1247 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun); 1248 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS); 1249 1250 FOR_EACH_BB_FN (this_basic_block, cfun) 1251 { 1252 rtx_insn *last_combined_insn = NULL; 1253 1254 /* Ignore instruction combination in basic blocks that are going to 1255 be removed as unreachable anyway. See PR82386. */ 1256 if (EDGE_COUNT (this_basic_block->preds) == 0) 1257 continue; 1258 1259 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block); 1260 last_call_luid = 0; 1261 mem_last_set = -1; 1262 1263 label_tick++; 1264 if (!single_pred_p (this_basic_block) 1265 || single_pred (this_basic_block) != last_bb) 1266 label_tick_ebb_start = label_tick; 1267 last_bb = this_basic_block; 1268 1269 rtl_profile_for_bb (this_basic_block); 1270 for (insn = BB_HEAD (this_basic_block); 1271 insn != NEXT_INSN (BB_END (this_basic_block)); 1272 insn = next ? next : NEXT_INSN (insn)) 1273 { 1274 next = 0; 1275 if (!NONDEBUG_INSN_P (insn)) 1276 continue; 1277 1278 while (last_combined_insn 1279 && (!NONDEBUG_INSN_P (last_combined_insn) 1280 || last_combined_insn->deleted ())) 1281 last_combined_insn = PREV_INSN (last_combined_insn); 1282 if (last_combined_insn == NULL_RTX 1283 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block 1284 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn)) 1285 last_combined_insn = insn; 1286 1287 /* See if we know about function return values before this 1288 insn based upon SUBREG flags. */ 1289 check_promoted_subreg (insn, PATTERN (insn)); 1290 1291 /* See if we can find hardregs and subreg of pseudos in 1292 narrower modes. This could help turning TRUNCATEs 1293 into SUBREGs. */ 1294 note_uses (&PATTERN (insn), record_truncated_values, NULL); 1295 1296 /* Try this insn with each insn it links back to. */ 1297 1298 FOR_EACH_LOG_LINK (links, insn) 1299 if ((next = try_combine (insn, links->insn, NULL, 1300 NULL, &new_direct_jump_p, 1301 last_combined_insn)) != 0) 1302 { 1303 statistics_counter_event (cfun, "two-insn combine", 1); 1304 goto retry; 1305 } 1306 1307 /* Try each sequence of three linked insns ending with this one. */ 1308 1309 if (max_combine >= 3) 1310 FOR_EACH_LOG_LINK (links, insn) 1311 { 1312 rtx_insn *link = links->insn; 1313 1314 /* If the linked insn has been replaced by a note, then there 1315 is no point in pursuing this chain any further. */ 1316 if (NOTE_P (link)) 1317 continue; 1318 1319 FOR_EACH_LOG_LINK (nextlinks, link) 1320 if ((next = try_combine (insn, link, nextlinks->insn, 1321 NULL, &new_direct_jump_p, 1322 last_combined_insn)) != 0) 1323 { 1324 statistics_counter_event (cfun, "three-insn combine", 1); 1325 goto retry; 1326 } 1327 } 1328 1329 /* Try to combine a jump insn that uses CC0 1330 with a preceding insn that sets CC0, and maybe with its 1331 logical predecessor as well. 1332 This is how we make decrement-and-branch insns. 1333 We need this special code because data flow connections 1334 via CC0 do not get entered in LOG_LINKS. */ 1335 1336 if (HAVE_cc0 1337 && JUMP_P (insn) 1338 && (prev = prev_nonnote_insn (insn)) != 0 1339 && NONJUMP_INSN_P (prev) 1340 && sets_cc0_p (PATTERN (prev))) 1341 { 1342 if ((next = try_combine (insn, prev, NULL, NULL, 1343 &new_direct_jump_p, 1344 last_combined_insn)) != 0) 1345 goto retry; 1346 1347 FOR_EACH_LOG_LINK (nextlinks, prev) 1348 if ((next = try_combine (insn, prev, nextlinks->insn, 1349 NULL, &new_direct_jump_p, 1350 last_combined_insn)) != 0) 1351 goto retry; 1352 } 1353 1354 /* Do the same for an insn that explicitly references CC0. */ 1355 if (HAVE_cc0 && NONJUMP_INSN_P (insn) 1356 && (prev = prev_nonnote_insn (insn)) != 0 1357 && NONJUMP_INSN_P (prev) 1358 && sets_cc0_p (PATTERN (prev)) 1359 && GET_CODE (PATTERN (insn)) == SET 1360 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn)))) 1361 { 1362 if ((next = try_combine (insn, prev, NULL, NULL, 1363 &new_direct_jump_p, 1364 last_combined_insn)) != 0) 1365 goto retry; 1366 1367 FOR_EACH_LOG_LINK (nextlinks, prev) 1368 if ((next = try_combine (insn, prev, nextlinks->insn, 1369 NULL, &new_direct_jump_p, 1370 last_combined_insn)) != 0) 1371 goto retry; 1372 } 1373 1374 /* Finally, see if any of the insns that this insn links to 1375 explicitly references CC0. If so, try this insn, that insn, 1376 and its predecessor if it sets CC0. */ 1377 if (HAVE_cc0) 1378 { 1379 FOR_EACH_LOG_LINK (links, insn) 1380 if (NONJUMP_INSN_P (links->insn) 1381 && GET_CODE (PATTERN (links->insn)) == SET 1382 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn))) 1383 && (prev = prev_nonnote_insn (links->insn)) != 0 1384 && NONJUMP_INSN_P (prev) 1385 && sets_cc0_p (PATTERN (prev)) 1386 && (next = try_combine (insn, links->insn, 1387 prev, NULL, &new_direct_jump_p, 1388 last_combined_insn)) != 0) 1389 goto retry; 1390 } 1391 1392 /* Try combining an insn with two different insns whose results it 1393 uses. */ 1394 if (max_combine >= 3) 1395 FOR_EACH_LOG_LINK (links, insn) 1396 for (nextlinks = links->next; nextlinks; 1397 nextlinks = nextlinks->next) 1398 if ((next = try_combine (insn, links->insn, 1399 nextlinks->insn, NULL, 1400 &new_direct_jump_p, 1401 last_combined_insn)) != 0) 1402 1403 { 1404 statistics_counter_event (cfun, "three-insn combine", 1); 1405 goto retry; 1406 } 1407 1408 /* Try four-instruction combinations. */ 1409 if (max_combine >= 4) 1410 FOR_EACH_LOG_LINK (links, insn) 1411 { 1412 struct insn_link *next1; 1413 rtx_insn *link = links->insn; 1414 1415 /* If the linked insn has been replaced by a note, then there 1416 is no point in pursuing this chain any further. */ 1417 if (NOTE_P (link)) 1418 continue; 1419 1420 FOR_EACH_LOG_LINK (next1, link) 1421 { 1422 rtx_insn *link1 = next1->insn; 1423 if (NOTE_P (link1)) 1424 continue; 1425 /* I0 -> I1 -> I2 -> I3. */ 1426 FOR_EACH_LOG_LINK (nextlinks, link1) 1427 if ((next = try_combine (insn, link, link1, 1428 nextlinks->insn, 1429 &new_direct_jump_p, 1430 last_combined_insn)) != 0) 1431 { 1432 statistics_counter_event (cfun, "four-insn combine", 1); 1433 goto retry; 1434 } 1435 /* I0, I1 -> I2, I2 -> I3. */ 1436 for (nextlinks = next1->next; nextlinks; 1437 nextlinks = nextlinks->next) 1438 if ((next = try_combine (insn, link, link1, 1439 nextlinks->insn, 1440 &new_direct_jump_p, 1441 last_combined_insn)) != 0) 1442 { 1443 statistics_counter_event (cfun, "four-insn combine", 1); 1444 goto retry; 1445 } 1446 } 1447 1448 for (next1 = links->next; next1; next1 = next1->next) 1449 { 1450 rtx_insn *link1 = next1->insn; 1451 if (NOTE_P (link1)) 1452 continue; 1453 /* I0 -> I2; I1, I2 -> I3. */ 1454 FOR_EACH_LOG_LINK (nextlinks, link) 1455 if ((next = try_combine (insn, link, link1, 1456 nextlinks->insn, 1457 &new_direct_jump_p, 1458 last_combined_insn)) != 0) 1459 { 1460 statistics_counter_event (cfun, "four-insn combine", 1); 1461 goto retry; 1462 } 1463 /* I0 -> I1; I1, I2 -> I3. */ 1464 FOR_EACH_LOG_LINK (nextlinks, link1) 1465 if ((next = try_combine (insn, link, link1, 1466 nextlinks->insn, 1467 &new_direct_jump_p, 1468 last_combined_insn)) != 0) 1469 { 1470 statistics_counter_event (cfun, "four-insn combine", 1); 1471 goto retry; 1472 } 1473 } 1474 } 1475 1476 /* Try this insn with each REG_EQUAL note it links back to. */ 1477 FOR_EACH_LOG_LINK (links, insn) 1478 { 1479 rtx set, note; 1480 rtx_insn *temp = links->insn; 1481 if ((set = single_set (temp)) != 0 1482 && (note = find_reg_equal_equiv_note (temp)) != 0 1483 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST 1484 /* Avoid using a register that may already been marked 1485 dead by an earlier instruction. */ 1486 && ! unmentioned_reg_p (note, SET_SRC (set)) 1487 && (GET_MODE (note) == VOIDmode 1488 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set))) 1489 : (GET_MODE (SET_DEST (set)) == GET_MODE (note) 1490 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT 1491 || (GET_MODE (XEXP (SET_DEST (set), 0)) 1492 == GET_MODE (note)))))) 1493 { 1494 /* Temporarily replace the set's source with the 1495 contents of the REG_EQUAL note. The insn will 1496 be deleted or recognized by try_combine. */ 1497 rtx orig_src = SET_SRC (set); 1498 rtx orig_dest = SET_DEST (set); 1499 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT) 1500 SET_DEST (set) = XEXP (SET_DEST (set), 0); 1501 SET_SRC (set) = note; 1502 i2mod = temp; 1503 i2mod_old_rhs = copy_rtx (orig_src); 1504 i2mod_new_rhs = copy_rtx (note); 1505 next = try_combine (insn, i2mod, NULL, NULL, 1506 &new_direct_jump_p, 1507 last_combined_insn); 1508 i2mod = NULL; 1509 if (next) 1510 { 1511 statistics_counter_event (cfun, "insn-with-note combine", 1); 1512 goto retry; 1513 } 1514 SET_SRC (set) = orig_src; 1515 SET_DEST (set) = orig_dest; 1516 } 1517 } 1518 1519 if (!NOTE_P (insn)) 1520 record_dead_and_set_regs (insn); 1521 1522 retry: 1523 ; 1524 } 1525 } 1526 1527 default_rtl_profile (); 1528 clear_bb_flags (); 1529 new_direct_jump_p |= purge_all_dead_edges (); 1530 delete_noop_moves (); 1531 1532 /* Clean up. */ 1533 obstack_free (&insn_link_obstack, NULL); 1534 free (uid_log_links); 1535 free (uid_insn_cost); 1536 reg_stat.release (); 1537 1538 { 1539 struct undo *undo, *next; 1540 for (undo = undobuf.frees; undo; undo = next) 1541 { 1542 next = undo->next; 1543 free (undo); 1544 } 1545 undobuf.frees = 0; 1546 } 1547 1548 total_attempts += combine_attempts; 1549 total_merges += combine_merges; 1550 total_extras += combine_extras; 1551 total_successes += combine_successes; 1552 1553 nonzero_sign_valid = 0; 1554 rtl_hooks = general_rtl_hooks; 1555 1556 /* Make recognizer allow volatile MEMs again. */ 1557 init_recog (); 1558 1559 return new_direct_jump_p; 1560 } 1561 1562 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */ 1563 1564 static void 1565 init_reg_last (void) 1566 { 1567 unsigned int i; 1568 reg_stat_type *p; 1569 1570 FOR_EACH_VEC_ELT (reg_stat, i, p) 1571 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies)); 1572 } 1573 1574 /* Set up any promoted values for incoming argument registers. */ 1575 1576 static void 1577 setup_incoming_promotions (rtx_insn *first) 1578 { 1579 tree arg; 1580 bool strictly_local = false; 1581 1582 for (arg = DECL_ARGUMENTS (current_function_decl); arg; 1583 arg = DECL_CHAIN (arg)) 1584 { 1585 rtx x, reg = DECL_INCOMING_RTL (arg); 1586 int uns1, uns3; 1587 machine_mode mode1, mode2, mode3, mode4; 1588 1589 /* Only continue if the incoming argument is in a register. */ 1590 if (!REG_P (reg)) 1591 continue; 1592 1593 /* Determine, if possible, whether all call sites of the current 1594 function lie within the current compilation unit. (This does 1595 take into account the exporting of a function via taking its 1596 address, and so forth.) */ 1597 strictly_local = cgraph_node::local_info (current_function_decl)->local; 1598 1599 /* The mode and signedness of the argument before any promotions happen 1600 (equal to the mode of the pseudo holding it at that stage). */ 1601 mode1 = TYPE_MODE (TREE_TYPE (arg)); 1602 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg)); 1603 1604 /* The mode and signedness of the argument after any source language and 1605 TARGET_PROMOTE_PROTOTYPES-driven promotions. */ 1606 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg)); 1607 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg)); 1608 1609 /* The mode and signedness of the argument as it is actually passed, 1610 see assign_parm_setup_reg in function.c. */ 1611 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3, 1612 TREE_TYPE (cfun->decl), 0); 1613 1614 /* The mode of the register in which the argument is being passed. */ 1615 mode4 = GET_MODE (reg); 1616 1617 /* Eliminate sign extensions in the callee when: 1618 (a) A mode promotion has occurred; */ 1619 if (mode1 == mode3) 1620 continue; 1621 /* (b) The mode of the register is the same as the mode of 1622 the argument as it is passed; */ 1623 if (mode3 != mode4) 1624 continue; 1625 /* (c) There's no language level extension; */ 1626 if (mode1 == mode2) 1627 ; 1628 /* (c.1) All callers are from the current compilation unit. If that's 1629 the case we don't have to rely on an ABI, we only have to know 1630 what we're generating right now, and we know that we will do the 1631 mode1 to mode2 promotion with the given sign. */ 1632 else if (!strictly_local) 1633 continue; 1634 /* (c.2) The combination of the two promotions is useful. This is 1635 true when the signs match, or if the first promotion is unsigned. 1636 In the later case, (sign_extend (zero_extend x)) is the same as 1637 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */ 1638 else if (uns1) 1639 uns3 = true; 1640 else if (uns3) 1641 continue; 1642 1643 /* Record that the value was promoted from mode1 to mode3, 1644 so that any sign extension at the head of the current 1645 function may be eliminated. */ 1646 x = gen_rtx_CLOBBER (mode1, const0_rtx); 1647 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x); 1648 record_value_for_reg (reg, first, x); 1649 } 1650 } 1651 1652 /* If MODE has a precision lower than PREC and SRC is a non-negative constant 1653 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits 1654 because some machines (maybe most) will actually do the sign-extension and 1655 this is the conservative approach. 1656 1657 ??? For 2.5, try to tighten up the MD files in this regard instead of this 1658 kludge. */ 1659 1660 static rtx 1661 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec) 1662 { 1663 scalar_int_mode int_mode; 1664 if (CONST_INT_P (src) 1665 && is_a <scalar_int_mode> (mode, &int_mode) 1666 && GET_MODE_PRECISION (int_mode) < prec 1667 && INTVAL (src) > 0 1668 && val_signbit_known_set_p (int_mode, INTVAL (src))) 1669 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode)); 1670 1671 return src; 1672 } 1673 1674 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists) 1675 and SET. */ 1676 1677 static void 1678 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set, 1679 rtx x) 1680 { 1681 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX; 1682 unsigned HOST_WIDE_INT bits = 0; 1683 rtx reg_equal = NULL, src = SET_SRC (set); 1684 unsigned int num = 0; 1685 1686 if (reg_equal_note) 1687 reg_equal = XEXP (reg_equal_note, 0); 1688 1689 if (SHORT_IMMEDIATES_SIGN_EXTEND) 1690 { 1691 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD); 1692 if (reg_equal) 1693 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD); 1694 } 1695 1696 /* Don't call nonzero_bits if it cannot change anything. */ 1697 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U) 1698 { 1699 bits = nonzero_bits (src, nonzero_bits_mode); 1700 if (reg_equal && bits) 1701 bits &= nonzero_bits (reg_equal, nonzero_bits_mode); 1702 rsp->nonzero_bits |= bits; 1703 } 1704 1705 /* Don't call num_sign_bit_copies if it cannot change anything. */ 1706 if (rsp->sign_bit_copies != 1) 1707 { 1708 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x)); 1709 if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x)))) 1710 { 1711 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x)); 1712 if (num == 0 || numeq > num) 1713 num = numeq; 1714 } 1715 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies) 1716 rsp->sign_bit_copies = num; 1717 } 1718 } 1719 1720 /* Called via note_stores. If X is a pseudo that is narrower than 1721 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero. 1722 1723 If we are setting only a portion of X and we can't figure out what 1724 portion, assume all bits will be used since we don't know what will 1725 be happening. 1726 1727 Similarly, set how many bits of X are known to be copies of the sign bit 1728 at all locations in the function. This is the smallest number implied 1729 by any set of X. */ 1730 1731 static void 1732 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data) 1733 { 1734 rtx_insn *insn = (rtx_insn *) data; 1735 scalar_int_mode mode; 1736 1737 if (REG_P (x) 1738 && REGNO (x) >= FIRST_PSEUDO_REGISTER 1739 /* If this register is undefined at the start of the file, we can't 1740 say what its contents were. */ 1741 && ! REGNO_REG_SET_P 1742 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x)) 1743 && is_a <scalar_int_mode> (GET_MODE (x), &mode) 1744 && HWI_COMPUTABLE_MODE_P (mode)) 1745 { 1746 reg_stat_type *rsp = ®_stat[REGNO (x)]; 1747 1748 if (set == 0 || GET_CODE (set) == CLOBBER) 1749 { 1750 rsp->nonzero_bits = GET_MODE_MASK (mode); 1751 rsp->sign_bit_copies = 1; 1752 return; 1753 } 1754 1755 /* If this register is being initialized using itself, and the 1756 register is uninitialized in this basic block, and there are 1757 no LOG_LINKS which set the register, then part of the 1758 register is uninitialized. In that case we can't assume 1759 anything about the number of nonzero bits. 1760 1761 ??? We could do better if we checked this in 1762 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we 1763 could avoid making assumptions about the insn which initially 1764 sets the register, while still using the information in other 1765 insns. We would have to be careful to check every insn 1766 involved in the combination. */ 1767 1768 if (insn 1769 && reg_referenced_p (x, PATTERN (insn)) 1770 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)), 1771 REGNO (x))) 1772 { 1773 struct insn_link *link; 1774 1775 FOR_EACH_LOG_LINK (link, insn) 1776 if (dead_or_set_p (link->insn, x)) 1777 break; 1778 if (!link) 1779 { 1780 rsp->nonzero_bits = GET_MODE_MASK (mode); 1781 rsp->sign_bit_copies = 1; 1782 return; 1783 } 1784 } 1785 1786 /* If this is a complex assignment, see if we can convert it into a 1787 simple assignment. */ 1788 set = expand_field_assignment (set); 1789 1790 /* If this is a simple assignment, or we have a paradoxical SUBREG, 1791 set what we know about X. */ 1792 1793 if (SET_DEST (set) == x 1794 || (paradoxical_subreg_p (SET_DEST (set)) 1795 && SUBREG_REG (SET_DEST (set)) == x)) 1796 update_rsp_from_reg_equal (rsp, insn, set, x); 1797 else 1798 { 1799 rsp->nonzero_bits = GET_MODE_MASK (mode); 1800 rsp->sign_bit_copies = 1; 1801 } 1802 } 1803 } 1804 1805 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are 1806 optionally insns that were previously combined into I3 or that will be 1807 combined into the merger of INSN and I3. The order is PRED, PRED2, 1808 INSN, SUCC, SUCC2, I3. 1809 1810 Return 0 if the combination is not allowed for any reason. 1811 1812 If the combination is allowed, *PDEST will be set to the single 1813 destination of INSN and *PSRC to the single source, and this function 1814 will return 1. */ 1815 1816 static int 1817 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED, 1818 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2, 1819 rtx *pdest, rtx *psrc) 1820 { 1821 int i; 1822 const_rtx set = 0; 1823 rtx src, dest; 1824 rtx_insn *p; 1825 rtx link; 1826 bool all_adjacent = true; 1827 int (*is_volatile_p) (const_rtx); 1828 1829 if (succ) 1830 { 1831 if (succ2) 1832 { 1833 if (next_active_insn (succ2) != i3) 1834 all_adjacent = false; 1835 if (next_active_insn (succ) != succ2) 1836 all_adjacent = false; 1837 } 1838 else if (next_active_insn (succ) != i3) 1839 all_adjacent = false; 1840 if (next_active_insn (insn) != succ) 1841 all_adjacent = false; 1842 } 1843 else if (next_active_insn (insn) != i3) 1844 all_adjacent = false; 1845 1846 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0. 1847 or a PARALLEL consisting of such a SET and CLOBBERs. 1848 1849 If INSN has CLOBBER parallel parts, ignore them for our processing. 1850 By definition, these happen during the execution of the insn. When it 1851 is merged with another insn, all bets are off. If they are, in fact, 1852 needed and aren't also supplied in I3, they may be added by 1853 recog_for_combine. Otherwise, it won't match. 1854 1855 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED 1856 note. 1857 1858 Get the source and destination of INSN. If more than one, can't 1859 combine. */ 1860 1861 if (GET_CODE (PATTERN (insn)) == SET) 1862 set = PATTERN (insn); 1863 else if (GET_CODE (PATTERN (insn)) == PARALLEL 1864 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET) 1865 { 1866 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++) 1867 { 1868 rtx elt = XVECEXP (PATTERN (insn), 0, i); 1869 1870 switch (GET_CODE (elt)) 1871 { 1872 /* This is important to combine floating point insns 1873 for the SH4 port. */ 1874 case USE: 1875 /* Combining an isolated USE doesn't make sense. 1876 We depend here on combinable_i3pat to reject them. */ 1877 /* The code below this loop only verifies that the inputs of 1878 the SET in INSN do not change. We call reg_set_between_p 1879 to verify that the REG in the USE does not change between 1880 I3 and INSN. 1881 If the USE in INSN was for a pseudo register, the matching 1882 insn pattern will likely match any register; combining this 1883 with any other USE would only be safe if we knew that the 1884 used registers have identical values, or if there was 1885 something to tell them apart, e.g. different modes. For 1886 now, we forgo such complicated tests and simply disallow 1887 combining of USES of pseudo registers with any other USE. */ 1888 if (REG_P (XEXP (elt, 0)) 1889 && GET_CODE (PATTERN (i3)) == PARALLEL) 1890 { 1891 rtx i3pat = PATTERN (i3); 1892 int i = XVECLEN (i3pat, 0) - 1; 1893 unsigned int regno = REGNO (XEXP (elt, 0)); 1894 1895 do 1896 { 1897 rtx i3elt = XVECEXP (i3pat, 0, i); 1898 1899 if (GET_CODE (i3elt) == USE 1900 && REG_P (XEXP (i3elt, 0)) 1901 && (REGNO (XEXP (i3elt, 0)) == regno 1902 ? reg_set_between_p (XEXP (elt, 0), 1903 PREV_INSN (insn), i3) 1904 : regno >= FIRST_PSEUDO_REGISTER)) 1905 return 0; 1906 } 1907 while (--i >= 0); 1908 } 1909 break; 1910 1911 /* We can ignore CLOBBERs. */ 1912 case CLOBBER: 1913 break; 1914 1915 case SET: 1916 /* Ignore SETs whose result isn't used but not those that 1917 have side-effects. */ 1918 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt)) 1919 && insn_nothrow_p (insn) 1920 && !side_effects_p (elt)) 1921 break; 1922 1923 /* If we have already found a SET, this is a second one and 1924 so we cannot combine with this insn. */ 1925 if (set) 1926 return 0; 1927 1928 set = elt; 1929 break; 1930 1931 default: 1932 /* Anything else means we can't combine. */ 1933 return 0; 1934 } 1935 } 1936 1937 if (set == 0 1938 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs, 1939 so don't do anything with it. */ 1940 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS) 1941 return 0; 1942 } 1943 else 1944 return 0; 1945 1946 if (set == 0) 1947 return 0; 1948 1949 /* The simplification in expand_field_assignment may call back to 1950 get_last_value, so set safe guard here. */ 1951 subst_low_luid = DF_INSN_LUID (insn); 1952 1953 set = expand_field_assignment (set); 1954 src = SET_SRC (set), dest = SET_DEST (set); 1955 1956 /* Do not eliminate user-specified register if it is in an 1957 asm input because we may break the register asm usage defined 1958 in GCC manual if allow to do so. 1959 Be aware that this may cover more cases than we expect but this 1960 should be harmless. */ 1961 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest) 1962 && extract_asm_operands (PATTERN (i3))) 1963 return 0; 1964 1965 /* Don't eliminate a store in the stack pointer. */ 1966 if (dest == stack_pointer_rtx 1967 /* Don't combine with an insn that sets a register to itself if it has 1968 a REG_EQUAL note. This may be part of a LIBCALL sequence. */ 1969 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX)) 1970 /* Can't merge an ASM_OPERANDS. */ 1971 || GET_CODE (src) == ASM_OPERANDS 1972 /* Can't merge a function call. */ 1973 || GET_CODE (src) == CALL 1974 /* Don't eliminate a function call argument. */ 1975 || (CALL_P (i3) 1976 && (find_reg_fusage (i3, USE, dest) 1977 || (REG_P (dest) 1978 && REGNO (dest) < FIRST_PSEUDO_REGISTER 1979 && global_regs[REGNO (dest)]))) 1980 /* Don't substitute into an incremented register. */ 1981 || FIND_REG_INC_NOTE (i3, dest) 1982 || (succ && FIND_REG_INC_NOTE (succ, dest)) 1983 || (succ2 && FIND_REG_INC_NOTE (succ2, dest)) 1984 /* Don't substitute into a non-local goto, this confuses CFG. */ 1985 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX)) 1986 /* Make sure that DEST is not used after INSN but before SUCC, or 1987 after SUCC and before SUCC2, or after SUCC2 but before I3. */ 1988 || (!all_adjacent 1989 && ((succ2 1990 && (reg_used_between_p (dest, succ2, i3) 1991 || reg_used_between_p (dest, succ, succ2))) 1992 || (!succ2 && succ && reg_used_between_p (dest, succ, i3)) 1993 || (!succ2 && !succ && reg_used_between_p (dest, insn, i3)) 1994 || (succ 1995 /* SUCC and SUCC2 can be split halves from a PARALLEL; in 1996 that case SUCC is not in the insn stream, so use SUCC2 1997 instead for this test. */ 1998 && reg_used_between_p (dest, insn, 1999 succ2 2000 && INSN_UID (succ) == INSN_UID (succ2) 2001 ? succ2 : succ)))) 2002 /* Make sure that the value that is to be substituted for the register 2003 does not use any registers whose values alter in between. However, 2004 If the insns are adjacent, a use can't cross a set even though we 2005 think it might (this can happen for a sequence of insns each setting 2006 the same destination; last_set of that register might point to 2007 a NOTE). If INSN has a REG_EQUIV note, the register is always 2008 equivalent to the memory so the substitution is valid even if there 2009 are intervening stores. Also, don't move a volatile asm or 2010 UNSPEC_VOLATILE across any other insns. */ 2011 || (! all_adjacent 2012 && (((!MEM_P (src) 2013 || ! find_reg_note (insn, REG_EQUIV, src)) 2014 && modified_between_p (src, insn, i3)) 2015 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src)) 2016 || GET_CODE (src) == UNSPEC_VOLATILE)) 2017 /* Don't combine across a CALL_INSN, because that would possibly 2018 change whether the life span of some REGs crosses calls or not, 2019 and it is a pain to update that information. 2020 Exception: if source is a constant, moving it later can't hurt. 2021 Accept that as a special case. */ 2022 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src))) 2023 return 0; 2024 2025 /* DEST must either be a REG or CC0. */ 2026 if (REG_P (dest)) 2027 { 2028 /* If register alignment is being enforced for multi-word items in all 2029 cases except for parameters, it is possible to have a register copy 2030 insn referencing a hard register that is not allowed to contain the 2031 mode being copied and which would not be valid as an operand of most 2032 insns. Eliminate this problem by not combining with such an insn. 2033 2034 Also, on some machines we don't want to extend the life of a hard 2035 register. */ 2036 2037 if (REG_P (src) 2038 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER 2039 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest))) 2040 /* Don't extend the life of a hard register unless it is 2041 user variable (if we have few registers) or it can't 2042 fit into the desired register (meaning something special 2043 is going on). 2044 Also avoid substituting a return register into I3, because 2045 reload can't handle a conflict with constraints of other 2046 inputs. */ 2047 || (REGNO (src) < FIRST_PSEUDO_REGISTER 2048 && !targetm.hard_regno_mode_ok (REGNO (src), 2049 GET_MODE (src))))) 2050 return 0; 2051 } 2052 else if (GET_CODE (dest) != CC0) 2053 return 0; 2054 2055 2056 if (GET_CODE (PATTERN (i3)) == PARALLEL) 2057 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--) 2058 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER) 2059 { 2060 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0); 2061 2062 /* If the clobber represents an earlyclobber operand, we must not 2063 substitute an expression containing the clobbered register. 2064 As we do not analyze the constraint strings here, we have to 2065 make the conservative assumption. However, if the register is 2066 a fixed hard reg, the clobber cannot represent any operand; 2067 we leave it up to the machine description to either accept or 2068 reject use-and-clobber patterns. */ 2069 if (!REG_P (reg) 2070 || REGNO (reg) >= FIRST_PSEUDO_REGISTER 2071 || !fixed_regs[REGNO (reg)]) 2072 if (reg_overlap_mentioned_p (reg, src)) 2073 return 0; 2074 } 2075 2076 /* If INSN contains anything volatile, or is an `asm' (whether volatile 2077 or not), reject, unless nothing volatile comes between it and I3 */ 2078 2079 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src)) 2080 { 2081 /* Make sure neither succ nor succ2 contains a volatile reference. */ 2082 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2))) 2083 return 0; 2084 if (succ != 0 && volatile_refs_p (PATTERN (succ))) 2085 return 0; 2086 /* We'll check insns between INSN and I3 below. */ 2087 } 2088 2089 /* If INSN is an asm, and DEST is a hard register, reject, since it has 2090 to be an explicit register variable, and was chosen for a reason. */ 2091 2092 if (GET_CODE (src) == ASM_OPERANDS 2093 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER) 2094 return 0; 2095 2096 /* If INSN contains volatile references (specifically volatile MEMs), 2097 we cannot combine across any other volatile references. 2098 Even if INSN doesn't contain volatile references, any intervening 2099 volatile insn might affect machine state. */ 2100 2101 is_volatile_p = volatile_refs_p (PATTERN (insn)) 2102 ? volatile_refs_p 2103 : volatile_insn_p; 2104 2105 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p)) 2106 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p))) 2107 return 0; 2108 2109 /* If INSN contains an autoincrement or autodecrement, make sure that 2110 register is not used between there and I3, and not already used in 2111 I3 either. Neither must it be used in PRED or SUCC, if they exist. 2112 Also insist that I3 not be a jump; if it were one 2113 and the incremented register were spilled, we would lose. */ 2114 2115 if (AUTO_INC_DEC) 2116 for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) 2117 if (REG_NOTE_KIND (link) == REG_INC 2118 && (JUMP_P (i3) 2119 || reg_used_between_p (XEXP (link, 0), insn, i3) 2120 || (pred != NULL_RTX 2121 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred))) 2122 || (pred2 != NULL_RTX 2123 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2))) 2124 || (succ != NULL_RTX 2125 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ))) 2126 || (succ2 != NULL_RTX 2127 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2))) 2128 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3)))) 2129 return 0; 2130 2131 /* Don't combine an insn that follows a CC0-setting insn. 2132 An insn that uses CC0 must not be separated from the one that sets it. 2133 We do, however, allow I2 to follow a CC0-setting insn if that insn 2134 is passed as I1; in that case it will be deleted also. 2135 We also allow combining in this case if all the insns are adjacent 2136 because that would leave the two CC0 insns adjacent as well. 2137 It would be more logical to test whether CC0 occurs inside I1 or I2, 2138 but that would be much slower, and this ought to be equivalent. */ 2139 2140 if (HAVE_cc0) 2141 { 2142 p = prev_nonnote_insn (insn); 2143 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p)) 2144 && ! all_adjacent) 2145 return 0; 2146 } 2147 2148 /* If we get here, we have passed all the tests and the combination is 2149 to be allowed. */ 2150 2151 *pdest = dest; 2152 *psrc = src; 2153 2154 return 1; 2155 } 2156 2157 /* LOC is the location within I3 that contains its pattern or the component 2158 of a PARALLEL of the pattern. We validate that it is valid for combining. 2159 2160 One problem is if I3 modifies its output, as opposed to replacing it 2161 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as 2162 doing so would produce an insn that is not equivalent to the original insns. 2163 2164 Consider: 2165 2166 (set (reg:DI 101) (reg:DI 100)) 2167 (set (subreg:SI (reg:DI 101) 0) <foo>) 2168 2169 This is NOT equivalent to: 2170 2171 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>) 2172 (set (reg:DI 101) (reg:DI 100))]) 2173 2174 Not only does this modify 100 (in which case it might still be valid 2175 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100. 2176 2177 We can also run into a problem if I2 sets a register that I1 2178 uses and I1 gets directly substituted into I3 (not via I2). In that 2179 case, we would be getting the wrong value of I2DEST into I3, so we 2180 must reject the combination. This case occurs when I2 and I1 both 2181 feed into I3, rather than when I1 feeds into I2, which feeds into I3. 2182 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source 2183 of a SET must prevent combination from occurring. The same situation 2184 can occur for I0, in which case I0_NOT_IN_SRC is set. 2185 2186 Before doing the above check, we first try to expand a field assignment 2187 into a set of logical operations. 2188 2189 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which 2190 we place a register that is both set and used within I3. If more than one 2191 such register is detected, we fail. 2192 2193 Return 1 if the combination is valid, zero otherwise. */ 2194 2195 static int 2196 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest, 2197 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed) 2198 { 2199 rtx x = *loc; 2200 2201 if (GET_CODE (x) == SET) 2202 { 2203 rtx set = x ; 2204 rtx dest = SET_DEST (set); 2205 rtx src = SET_SRC (set); 2206 rtx inner_dest = dest; 2207 rtx subdest; 2208 2209 while (GET_CODE (inner_dest) == STRICT_LOW_PART 2210 || GET_CODE (inner_dest) == SUBREG 2211 || GET_CODE (inner_dest) == ZERO_EXTRACT) 2212 inner_dest = XEXP (inner_dest, 0); 2213 2214 /* Check for the case where I3 modifies its output, as discussed 2215 above. We don't want to prevent pseudos from being combined 2216 into the address of a MEM, so only prevent the combination if 2217 i1 or i2 set the same MEM. */ 2218 if ((inner_dest != dest && 2219 (!MEM_P (inner_dest) 2220 || rtx_equal_p (i2dest, inner_dest) 2221 || (i1dest && rtx_equal_p (i1dest, inner_dest)) 2222 || (i0dest && rtx_equal_p (i0dest, inner_dest))) 2223 && (reg_overlap_mentioned_p (i2dest, inner_dest) 2224 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest)) 2225 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest)))) 2226 2227 /* This is the same test done in can_combine_p except we can't test 2228 all_adjacent; we don't have to, since this instruction will stay 2229 in place, thus we are not considering increasing the lifetime of 2230 INNER_DEST. 2231 2232 Also, if this insn sets a function argument, combining it with 2233 something that might need a spill could clobber a previous 2234 function argument; the all_adjacent test in can_combine_p also 2235 checks this; here, we do a more specific test for this case. */ 2236 2237 || (REG_P (inner_dest) 2238 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER 2239 && !targetm.hard_regno_mode_ok (REGNO (inner_dest), 2240 GET_MODE (inner_dest))) 2241 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src)) 2242 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src))) 2243 return 0; 2244 2245 /* If DEST is used in I3, it is being killed in this insn, so 2246 record that for later. We have to consider paradoxical 2247 subregs here, since they kill the whole register, but we 2248 ignore partial subregs, STRICT_LOW_PART, etc. 2249 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the 2250 STACK_POINTER_REGNUM, since these are always considered to be 2251 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */ 2252 subdest = dest; 2253 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest)) 2254 subdest = SUBREG_REG (subdest); 2255 if (pi3dest_killed 2256 && REG_P (subdest) 2257 && reg_referenced_p (subdest, PATTERN (i3)) 2258 && REGNO (subdest) != FRAME_POINTER_REGNUM 2259 && (HARD_FRAME_POINTER_IS_FRAME_POINTER 2260 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM) 2261 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM 2262 || (REGNO (subdest) != ARG_POINTER_REGNUM 2263 || ! fixed_regs [REGNO (subdest)])) 2264 && REGNO (subdest) != STACK_POINTER_REGNUM) 2265 { 2266 if (*pi3dest_killed) 2267 return 0; 2268 2269 *pi3dest_killed = subdest; 2270 } 2271 } 2272 2273 else if (GET_CODE (x) == PARALLEL) 2274 { 2275 int i; 2276 2277 for (i = 0; i < XVECLEN (x, 0); i++) 2278 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest, 2279 i1_not_in_src, i0_not_in_src, pi3dest_killed)) 2280 return 0; 2281 } 2282 2283 return 1; 2284 } 2285 2286 /* Return 1 if X is an arithmetic expression that contains a multiplication 2287 and division. We don't count multiplications by powers of two here. */ 2288 2289 static int 2290 contains_muldiv (rtx x) 2291 { 2292 switch (GET_CODE (x)) 2293 { 2294 case MOD: case DIV: case UMOD: case UDIV: 2295 return 1; 2296 2297 case MULT: 2298 return ! (CONST_INT_P (XEXP (x, 1)) 2299 && pow2p_hwi (UINTVAL (XEXP (x, 1)))); 2300 default: 2301 if (BINARY_P (x)) 2302 return contains_muldiv (XEXP (x, 0)) 2303 || contains_muldiv (XEXP (x, 1)); 2304 2305 if (UNARY_P (x)) 2306 return contains_muldiv (XEXP (x, 0)); 2307 2308 return 0; 2309 } 2310 } 2311 2312 /* Determine whether INSN can be used in a combination. Return nonzero if 2313 not. This is used in try_combine to detect early some cases where we 2314 can't perform combinations. */ 2315 2316 static int 2317 cant_combine_insn_p (rtx_insn *insn) 2318 { 2319 rtx set; 2320 rtx src, dest; 2321 2322 /* If this isn't really an insn, we can't do anything. 2323 This can occur when flow deletes an insn that it has merged into an 2324 auto-increment address. */ 2325 if (!NONDEBUG_INSN_P (insn)) 2326 return 1; 2327 2328 /* Never combine loads and stores involving hard regs that are likely 2329 to be spilled. The register allocator can usually handle such 2330 reg-reg moves by tying. If we allow the combiner to make 2331 substitutions of likely-spilled regs, reload might die. 2332 As an exception, we allow combinations involving fixed regs; these are 2333 not available to the register allocator so there's no risk involved. */ 2334 2335 set = single_set (insn); 2336 if (! set) 2337 return 0; 2338 src = SET_SRC (set); 2339 dest = SET_DEST (set); 2340 if (GET_CODE (src) == SUBREG) 2341 src = SUBREG_REG (src); 2342 if (GET_CODE (dest) == SUBREG) 2343 dest = SUBREG_REG (dest); 2344 if (REG_P (src) && REG_P (dest) 2345 && ((HARD_REGISTER_P (src) 2346 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src)) 2347 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src)))) 2348 || (HARD_REGISTER_P (dest) 2349 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest)) 2350 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest)))))) 2351 return 1; 2352 2353 return 0; 2354 } 2355 2356 struct likely_spilled_retval_info 2357 { 2358 unsigned regno, nregs; 2359 unsigned mask; 2360 }; 2361 2362 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask 2363 hard registers that are known to be written to / clobbered in full. */ 2364 static void 2365 likely_spilled_retval_1 (rtx x, const_rtx set, void *data) 2366 { 2367 struct likely_spilled_retval_info *const info = 2368 (struct likely_spilled_retval_info *) data; 2369 unsigned regno, nregs; 2370 unsigned new_mask; 2371 2372 if (!REG_P (XEXP (set, 0))) 2373 return; 2374 regno = REGNO (x); 2375 if (regno >= info->regno + info->nregs) 2376 return; 2377 nregs = REG_NREGS (x); 2378 if (regno + nregs <= info->regno) 2379 return; 2380 new_mask = (2U << (nregs - 1)) - 1; 2381 if (regno < info->regno) 2382 new_mask >>= info->regno - regno; 2383 else 2384 new_mask <<= regno - info->regno; 2385 info->mask &= ~new_mask; 2386 } 2387 2388 /* Return nonzero iff part of the return value is live during INSN, and 2389 it is likely spilled. This can happen when more than one insn is needed 2390 to copy the return value, e.g. when we consider to combine into the 2391 second copy insn for a complex value. */ 2392 2393 static int 2394 likely_spilled_retval_p (rtx_insn *insn) 2395 { 2396 rtx_insn *use = BB_END (this_basic_block); 2397 rtx reg; 2398 rtx_insn *p; 2399 unsigned regno, nregs; 2400 /* We assume here that no machine mode needs more than 2401 32 hard registers when the value overlaps with a register 2402 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */ 2403 unsigned mask; 2404 struct likely_spilled_retval_info info; 2405 2406 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use) 2407 return 0; 2408 reg = XEXP (PATTERN (use), 0); 2409 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg))) 2410 return 0; 2411 regno = REGNO (reg); 2412 nregs = REG_NREGS (reg); 2413 if (nregs == 1) 2414 return 0; 2415 mask = (2U << (nregs - 1)) - 1; 2416 2417 /* Disregard parts of the return value that are set later. */ 2418 info.regno = regno; 2419 info.nregs = nregs; 2420 info.mask = mask; 2421 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p)) 2422 if (INSN_P (p)) 2423 note_stores (PATTERN (p), likely_spilled_retval_1, &info); 2424 mask = info.mask; 2425 2426 /* Check if any of the (probably) live return value registers is 2427 likely spilled. */ 2428 nregs --; 2429 do 2430 { 2431 if ((mask & 1 << nregs) 2432 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs))) 2433 return 1; 2434 } while (nregs--); 2435 return 0; 2436 } 2437 2438 /* Adjust INSN after we made a change to its destination. 2439 2440 Changing the destination can invalidate notes that say something about 2441 the results of the insn and a LOG_LINK pointing to the insn. */ 2442 2443 static void 2444 adjust_for_new_dest (rtx_insn *insn) 2445 { 2446 /* For notes, be conservative and simply remove them. */ 2447 remove_reg_equal_equiv_notes (insn); 2448 2449 /* The new insn will have a destination that was previously the destination 2450 of an insn just above it. Call distribute_links to make a LOG_LINK from 2451 the next use of that destination. */ 2452 2453 rtx set = single_set (insn); 2454 gcc_assert (set); 2455 2456 rtx reg = SET_DEST (set); 2457 2458 while (GET_CODE (reg) == ZERO_EXTRACT 2459 || GET_CODE (reg) == STRICT_LOW_PART 2460 || GET_CODE (reg) == SUBREG) 2461 reg = XEXP (reg, 0); 2462 gcc_assert (REG_P (reg)); 2463 2464 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL)); 2465 2466 df_insn_rescan (insn); 2467 } 2468 2469 /* Return TRUE if combine can reuse reg X in mode MODE. 2470 ADDED_SETS is nonzero if the original set is still required. */ 2471 static bool 2472 can_change_dest_mode (rtx x, int added_sets, machine_mode mode) 2473 { 2474 unsigned int regno; 2475 2476 if (!REG_P (x)) 2477 return false; 2478 2479 /* Don't change between modes with different underlying register sizes, 2480 since this could lead to invalid subregs. */ 2481 if (maybe_ne (REGMODE_NATURAL_SIZE (mode), 2482 REGMODE_NATURAL_SIZE (GET_MODE (x)))) 2483 return false; 2484 2485 regno = REGNO (x); 2486 /* Allow hard registers if the new mode is legal, and occupies no more 2487 registers than the old mode. */ 2488 if (regno < FIRST_PSEUDO_REGISTER) 2489 return (targetm.hard_regno_mode_ok (regno, mode) 2490 && REG_NREGS (x) >= hard_regno_nregs (regno, mode)); 2491 2492 /* Or a pseudo that is only used once. */ 2493 return (regno < reg_n_sets_max 2494 && REG_N_SETS (regno) == 1 2495 && !added_sets 2496 && !REG_USERVAR_P (x)); 2497 } 2498 2499 2500 /* Check whether X, the destination of a set, refers to part of 2501 the register specified by REG. */ 2502 2503 static bool 2504 reg_subword_p (rtx x, rtx reg) 2505 { 2506 /* Check that reg is an integer mode register. */ 2507 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT) 2508 return false; 2509 2510 if (GET_CODE (x) == STRICT_LOW_PART 2511 || GET_CODE (x) == ZERO_EXTRACT) 2512 x = XEXP (x, 0); 2513 2514 return GET_CODE (x) == SUBREG 2515 && SUBREG_REG (x) == reg 2516 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT; 2517 } 2518 2519 /* Delete the unconditional jump INSN and adjust the CFG correspondingly. 2520 Note that the INSN should be deleted *after* removing dead edges, so 2521 that the kept edge is the fallthrough edge for a (set (pc) (pc)) 2522 but not for a (set (pc) (label_ref FOO)). */ 2523 2524 static void 2525 update_cfg_for_uncondjump (rtx_insn *insn) 2526 { 2527 basic_block bb = BLOCK_FOR_INSN (insn); 2528 gcc_assert (BB_END (bb) == insn); 2529 2530 purge_dead_edges (bb); 2531 2532 delete_insn (insn); 2533 if (EDGE_COUNT (bb->succs) == 1) 2534 { 2535 rtx_insn *insn; 2536 2537 single_succ_edge (bb)->flags |= EDGE_FALLTHRU; 2538 2539 /* Remove barriers from the footer if there are any. */ 2540 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn)) 2541 if (BARRIER_P (insn)) 2542 { 2543 if (PREV_INSN (insn)) 2544 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn); 2545 else 2546 BB_FOOTER (bb) = NEXT_INSN (insn); 2547 if (NEXT_INSN (insn)) 2548 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn); 2549 } 2550 else if (LABEL_P (insn)) 2551 break; 2552 } 2553 } 2554 2555 /* Return whether PAT is a PARALLEL of exactly N register SETs followed 2556 by an arbitrary number of CLOBBERs. */ 2557 static bool 2558 is_parallel_of_n_reg_sets (rtx pat, int n) 2559 { 2560 if (GET_CODE (pat) != PARALLEL) 2561 return false; 2562 2563 int len = XVECLEN (pat, 0); 2564 if (len < n) 2565 return false; 2566 2567 int i; 2568 for (i = 0; i < n; i++) 2569 if (GET_CODE (XVECEXP (pat, 0, i)) != SET 2570 || !REG_P (SET_DEST (XVECEXP (pat, 0, i)))) 2571 return false; 2572 for ( ; i < len; i++) 2573 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER 2574 || XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx) 2575 return false; 2576 2577 return true; 2578 } 2579 2580 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some 2581 CLOBBERs), can be split into individual SETs in that order, without 2582 changing semantics. */ 2583 static bool 2584 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n) 2585 { 2586 if (!insn_nothrow_p (insn)) 2587 return false; 2588 2589 rtx pat = PATTERN (insn); 2590 2591 int i, j; 2592 for (i = 0; i < n; i++) 2593 { 2594 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i)))) 2595 return false; 2596 2597 rtx reg = SET_DEST (XVECEXP (pat, 0, i)); 2598 2599 for (j = i + 1; j < n; j++) 2600 if (reg_referenced_p (reg, XVECEXP (pat, 0, j))) 2601 return false; 2602 } 2603 2604 return true; 2605 } 2606 2607 /* Try to combine the insns I0, I1 and I2 into I3. 2608 Here I0, I1 and I2 appear earlier than I3. 2609 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into 2610 I3. 2611 2612 If we are combining more than two insns and the resulting insn is not 2613 recognized, try splitting it into two insns. If that happens, I2 and I3 2614 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE. 2615 Otherwise, I0, I1 and I2 are pseudo-deleted. 2616 2617 Return 0 if the combination does not work. Then nothing is changed. 2618 If we did the combination, return the insn at which combine should 2619 resume scanning. 2620 2621 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a 2622 new direct jump instruction. 2623 2624 LAST_COMBINED_INSN is either I3, or some insn after I3 that has 2625 been I3 passed to an earlier try_combine within the same basic 2626 block. */ 2627 2628 static rtx_insn * 2629 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0, 2630 int *new_direct_jump_p, rtx_insn *last_combined_insn) 2631 { 2632 /* New patterns for I3 and I2, respectively. */ 2633 rtx newpat, newi2pat = 0; 2634 rtvec newpat_vec_with_clobbers = 0; 2635 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0; 2636 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not 2637 dead. */ 2638 int added_sets_0, added_sets_1, added_sets_2; 2639 /* Total number of SETs to put into I3. */ 2640 int total_sets; 2641 /* Nonzero if I2's or I1's body now appears in I3. */ 2642 int i2_is_used = 0, i1_is_used = 0; 2643 /* INSN_CODEs for new I3, new I2, and user of condition code. */ 2644 int insn_code_number, i2_code_number = 0, other_code_number = 0; 2645 /* Contains I3 if the destination of I3 is used in its source, which means 2646 that the old life of I3 is being killed. If that usage is placed into 2647 I2 and not in I3, a REG_DEAD note must be made. */ 2648 rtx i3dest_killed = 0; 2649 /* SET_DEST and SET_SRC of I2, I1 and I0. */ 2650 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0; 2651 /* Copy of SET_SRC of I1 and I0, if needed. */ 2652 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0; 2653 /* Set if I2DEST was reused as a scratch register. */ 2654 bool i2scratch = false; 2655 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */ 2656 rtx i0pat = 0, i1pat = 0, i2pat = 0; 2657 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */ 2658 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0; 2659 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0; 2660 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0; 2661 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0; 2662 /* Notes that must be added to REG_NOTES in I3 and I2. */ 2663 rtx new_i3_notes, new_i2_notes; 2664 /* Notes that we substituted I3 into I2 instead of the normal case. */ 2665 int i3_subst_into_i2 = 0; 2666 /* Notes that I1, I2 or I3 is a MULT operation. */ 2667 int have_mult = 0; 2668 int swap_i2i3 = 0; 2669 int split_i2i3 = 0; 2670 int changed_i3_dest = 0; 2671 2672 int maxreg; 2673 rtx_insn *temp_insn; 2674 rtx temp_expr; 2675 struct insn_link *link; 2676 rtx other_pat = 0; 2677 rtx new_other_notes; 2678 int i; 2679 scalar_int_mode dest_mode, temp_mode; 2680 2681 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can 2682 never be). */ 2683 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1)) 2684 return 0; 2685 2686 /* Only try four-insn combinations when there's high likelihood of 2687 success. Look for simple insns, such as loads of constants or 2688 binary operations involving a constant. */ 2689 if (i0) 2690 { 2691 int i; 2692 int ngood = 0; 2693 int nshift = 0; 2694 rtx set0, set3; 2695 2696 if (!flag_expensive_optimizations) 2697 return 0; 2698 2699 for (i = 0; i < 4; i++) 2700 { 2701 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3; 2702 rtx set = single_set (insn); 2703 rtx src; 2704 if (!set) 2705 continue; 2706 src = SET_SRC (set); 2707 if (CONSTANT_P (src)) 2708 { 2709 ngood += 2; 2710 break; 2711 } 2712 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1))) 2713 ngood++; 2714 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT 2715 || GET_CODE (src) == LSHIFTRT) 2716 nshift++; 2717 } 2718 2719 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2 2720 are likely manipulating its value. Ideally we'll be able to combine 2721 all four insns into a bitfield insertion of some kind. 2722 2723 Note the source in I0 might be inside a sign/zero extension and the 2724 memory modes in I0 and I3 might be different. So extract the address 2725 from the destination of I3 and search for it in the source of I0. 2726 2727 In the event that there's a match but the source/dest do not actually 2728 refer to the same memory, the worst that happens is we try some 2729 combinations that we wouldn't have otherwise. */ 2730 if ((set0 = single_set (i0)) 2731 /* Ensure the source of SET0 is a MEM, possibly buried inside 2732 an extension. */ 2733 && (GET_CODE (SET_SRC (set0)) == MEM 2734 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND 2735 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND) 2736 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM)) 2737 && (set3 = single_set (i3)) 2738 /* Ensure the destination of SET3 is a MEM. */ 2739 && GET_CODE (SET_DEST (set3)) == MEM 2740 /* Would it be better to extract the base address for the MEM 2741 in SET3 and look for that? I don't have cases where it matters 2742 but I could envision such cases. */ 2743 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0))) 2744 ngood += 2; 2745 2746 if (ngood < 2 && nshift < 2) 2747 return 0; 2748 } 2749 2750 /* Exit early if one of the insns involved can't be used for 2751 combinations. */ 2752 if (CALL_P (i2) 2753 || (i1 && CALL_P (i1)) 2754 || (i0 && CALL_P (i0)) 2755 || cant_combine_insn_p (i3) 2756 || cant_combine_insn_p (i2) 2757 || (i1 && cant_combine_insn_p (i1)) 2758 || (i0 && cant_combine_insn_p (i0)) 2759 || likely_spilled_retval_p (i3)) 2760 return 0; 2761 2762 combine_attempts++; 2763 undobuf.other_insn = 0; 2764 2765 /* Reset the hard register usage information. */ 2766 CLEAR_HARD_REG_SET (newpat_used_regs); 2767 2768 if (dump_file && (dump_flags & TDF_DETAILS)) 2769 { 2770 if (i0) 2771 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n", 2772 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3)); 2773 else if (i1) 2774 fprintf (dump_file, "\nTrying %d, %d -> %d:\n", 2775 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3)); 2776 else 2777 fprintf (dump_file, "\nTrying %d -> %d:\n", 2778 INSN_UID (i2), INSN_UID (i3)); 2779 2780 if (i0) 2781 dump_insn_slim (dump_file, i0); 2782 if (i1) 2783 dump_insn_slim (dump_file, i1); 2784 dump_insn_slim (dump_file, i2); 2785 dump_insn_slim (dump_file, i3); 2786 } 2787 2788 /* If multiple insns feed into one of I2 or I3, they can be in any 2789 order. To simplify the code below, reorder them in sequence. */ 2790 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2)) 2791 std::swap (i0, i2); 2792 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1)) 2793 std::swap (i0, i1); 2794 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2)) 2795 std::swap (i1, i2); 2796 2797 added_links_insn = 0; 2798 added_notes_insn = 0; 2799 2800 /* First check for one important special case that the code below will 2801 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL 2802 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case, 2803 we may be able to replace that destination with the destination of I3. 2804 This occurs in the common code where we compute both a quotient and 2805 remainder into a structure, in which case we want to do the computation 2806 directly into the structure to avoid register-register copies. 2807 2808 Note that this case handles both multiple sets in I2 and also cases 2809 where I2 has a number of CLOBBERs inside the PARALLEL. 2810 2811 We make very conservative checks below and only try to handle the 2812 most common cases of this. For example, we only handle the case 2813 where I2 and I3 are adjacent to avoid making difficult register 2814 usage tests. */ 2815 2816 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET 2817 && REG_P (SET_SRC (PATTERN (i3))) 2818 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER 2819 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3))) 2820 && GET_CODE (PATTERN (i2)) == PARALLEL 2821 && ! side_effects_p (SET_DEST (PATTERN (i3))) 2822 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code 2823 below would need to check what is inside (and reg_overlap_mentioned_p 2824 doesn't support those codes anyway). Don't allow those destinations; 2825 the resulting insn isn't likely to be recognized anyway. */ 2826 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT 2827 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART 2828 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)), 2829 SET_DEST (PATTERN (i3))) 2830 && next_active_insn (i2) == i3) 2831 { 2832 rtx p2 = PATTERN (i2); 2833 2834 /* Make sure that the destination of I3, 2835 which we are going to substitute into one output of I2, 2836 is not used within another output of I2. We must avoid making this: 2837 (parallel [(set (mem (reg 69)) ...) 2838 (set (reg 69) ...)]) 2839 which is not well-defined as to order of actions. 2840 (Besides, reload can't handle output reloads for this.) 2841 2842 The problem can also happen if the dest of I3 is a memory ref, 2843 if another dest in I2 is an indirect memory ref. 2844 2845 Neither can this PARALLEL be an asm. We do not allow combining 2846 that usually (see can_combine_p), so do not here either. */ 2847 bool ok = true; 2848 for (i = 0; ok && i < XVECLEN (p2, 0); i++) 2849 { 2850 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET 2851 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER) 2852 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)), 2853 SET_DEST (XVECEXP (p2, 0, i)))) 2854 ok = false; 2855 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET 2856 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS) 2857 ok = false; 2858 } 2859 2860 if (ok) 2861 for (i = 0; i < XVECLEN (p2, 0); i++) 2862 if (GET_CODE (XVECEXP (p2, 0, i)) == SET 2863 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3))) 2864 { 2865 combine_merges++; 2866 2867 subst_insn = i3; 2868 subst_low_luid = DF_INSN_LUID (i2); 2869 2870 added_sets_2 = added_sets_1 = added_sets_0 = 0; 2871 i2src = SET_SRC (XVECEXP (p2, 0, i)); 2872 i2dest = SET_DEST (XVECEXP (p2, 0, i)); 2873 i2dest_killed = dead_or_set_p (i2, i2dest); 2874 2875 /* Replace the dest in I2 with our dest and make the resulting 2876 insn the new pattern for I3. Then skip to where we validate 2877 the pattern. Everything was set up above. */ 2878 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3))); 2879 newpat = p2; 2880 i3_subst_into_i2 = 1; 2881 goto validate_replacement; 2882 } 2883 } 2884 2885 /* If I2 is setting a pseudo to a constant and I3 is setting some 2886 sub-part of it to another constant, merge them by making a new 2887 constant. */ 2888 if (i1 == 0 2889 && (temp_expr = single_set (i2)) != 0 2890 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode) 2891 && CONST_SCALAR_INT_P (SET_SRC (temp_expr)) 2892 && GET_CODE (PATTERN (i3)) == SET 2893 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3))) 2894 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr))) 2895 { 2896 rtx dest = SET_DEST (PATTERN (i3)); 2897 rtx temp_dest = SET_DEST (temp_expr); 2898 int offset = -1; 2899 int width = 0; 2900 2901 if (GET_CODE (dest) == ZERO_EXTRACT) 2902 { 2903 if (CONST_INT_P (XEXP (dest, 1)) 2904 && CONST_INT_P (XEXP (dest, 2)) 2905 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)), 2906 &dest_mode)) 2907 { 2908 width = INTVAL (XEXP (dest, 1)); 2909 offset = INTVAL (XEXP (dest, 2)); 2910 dest = XEXP (dest, 0); 2911 if (BITS_BIG_ENDIAN) 2912 offset = GET_MODE_PRECISION (dest_mode) - width - offset; 2913 } 2914 } 2915 else 2916 { 2917 if (GET_CODE (dest) == STRICT_LOW_PART) 2918 dest = XEXP (dest, 0); 2919 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode)) 2920 { 2921 width = GET_MODE_PRECISION (dest_mode); 2922 offset = 0; 2923 } 2924 } 2925 2926 if (offset >= 0) 2927 { 2928 /* If this is the low part, we're done. */ 2929 if (subreg_lowpart_p (dest)) 2930 ; 2931 /* Handle the case where inner is twice the size of outer. */ 2932 else if (GET_MODE_PRECISION (temp_mode) 2933 == 2 * GET_MODE_PRECISION (dest_mode)) 2934 offset += GET_MODE_PRECISION (dest_mode); 2935 /* Otherwise give up for now. */ 2936 else 2937 offset = -1; 2938 } 2939 2940 if (offset >= 0) 2941 { 2942 rtx inner = SET_SRC (PATTERN (i3)); 2943 rtx outer = SET_SRC (temp_expr); 2944 2945 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode), 2946 rtx_mode_t (inner, dest_mode), 2947 offset, width); 2948 2949 combine_merges++; 2950 subst_insn = i3; 2951 subst_low_luid = DF_INSN_LUID (i2); 2952 added_sets_2 = added_sets_1 = added_sets_0 = 0; 2953 i2dest = temp_dest; 2954 i2dest_killed = dead_or_set_p (i2, i2dest); 2955 2956 /* Replace the source in I2 with the new constant and make the 2957 resulting insn the new pattern for I3. Then skip to where we 2958 validate the pattern. Everything was set up above. */ 2959 SUBST (SET_SRC (temp_expr), 2960 immed_wide_int_const (o, temp_mode)); 2961 2962 newpat = PATTERN (i2); 2963 2964 /* The dest of I3 has been replaced with the dest of I2. */ 2965 changed_i3_dest = 1; 2966 goto validate_replacement; 2967 } 2968 } 2969 2970 /* If we have no I1 and I2 looks like: 2971 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0))) 2972 (set Y OP)]) 2973 make up a dummy I1 that is 2974 (set Y OP) 2975 and change I2 to be 2976 (set (reg:CC X) (compare:CC Y (const_int 0))) 2977 2978 (We can ignore any trailing CLOBBERs.) 2979 2980 This undoes a previous combination and allows us to match a branch-and- 2981 decrement insn. */ 2982 2983 if (!HAVE_cc0 && i1 == 0 2984 && is_parallel_of_n_reg_sets (PATTERN (i2), 2) 2985 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)))) 2986 == MODE_CC) 2987 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE 2988 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx 2989 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0), 2990 SET_SRC (XVECEXP (PATTERN (i2), 0, 1))) 2991 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3) 2992 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)) 2993 { 2994 /* We make I1 with the same INSN_UID as I2. This gives it 2995 the same DF_INSN_LUID for value tracking. Our fake I1 will 2996 never appear in the insn stream so giving it the same INSN_UID 2997 as I2 will not cause a problem. */ 2998 2999 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2), 3000 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2), 3001 -1, NULL_RTX); 3002 INSN_UID (i1) = INSN_UID (i2); 3003 3004 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0)); 3005 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0), 3006 SET_DEST (PATTERN (i1))); 3007 unsigned int regno = REGNO (SET_DEST (PATTERN (i1))); 3008 SUBST_LINK (LOG_LINKS (i2), 3009 alloc_insn_link (i1, regno, LOG_LINKS (i2))); 3010 } 3011 3012 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs), 3013 make those two SETs separate I1 and I2 insns, and make an I0 that is 3014 the original I1. */ 3015 if (!HAVE_cc0 && i0 == 0 3016 && is_parallel_of_n_reg_sets (PATTERN (i2), 2) 3017 && can_split_parallel_of_n_reg_sets (i2, 2) 3018 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3) 3019 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3) 3020 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3) 3021 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)) 3022 { 3023 /* If there is no I1, there is no I0 either. */ 3024 i0 = i1; 3025 3026 /* We make I1 with the same INSN_UID as I2. This gives it 3027 the same DF_INSN_LUID for value tracking. Our fake I1 will 3028 never appear in the insn stream so giving it the same INSN_UID 3029 as I2 will not cause a problem. */ 3030 3031 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2), 3032 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2), 3033 -1, NULL_RTX); 3034 INSN_UID (i1) = INSN_UID (i2); 3035 3036 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1)); 3037 } 3038 3039 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */ 3040 if (!can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)) 3041 { 3042 if (dump_file) 3043 fprintf (dump_file, "Can't combine i2 into i3\n"); 3044 undo_all (); 3045 return 0; 3046 } 3047 if (i1 && !can_combine_p (i1, i3, i0, NULL, i2, NULL, &i1dest, &i1src)) 3048 { 3049 if (dump_file) 3050 fprintf (dump_file, "Can't combine i1 into i3\n"); 3051 undo_all (); 3052 return 0; 3053 } 3054 if (i0 && !can_combine_p (i0, i3, NULL, NULL, i1, i2, &i0dest, &i0src)) 3055 { 3056 if (dump_file) 3057 fprintf (dump_file, "Can't combine i0 into i3\n"); 3058 undo_all (); 3059 return 0; 3060 } 3061 3062 /* Record whether I2DEST is used in I2SRC and similarly for the other 3063 cases. Knowing this will help in register status updating below. */ 3064 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src); 3065 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src); 3066 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src); 3067 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src); 3068 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src); 3069 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src); 3070 i2dest_killed = dead_or_set_p (i2, i2dest); 3071 i1dest_killed = i1 && dead_or_set_p (i1, i1dest); 3072 i0dest_killed = i0 && dead_or_set_p (i0, i0dest); 3073 3074 /* For the earlier insns, determine which of the subsequent ones they 3075 feed. */ 3076 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2); 3077 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1); 3078 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2) 3079 : (!reg_overlap_mentioned_p (i1dest, i0dest) 3080 && reg_overlap_mentioned_p (i0dest, i2src)))); 3081 3082 /* Ensure that I3's pattern can be the destination of combines. */ 3083 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest, 3084 i1 && i2dest_in_i1src && !i1_feeds_i2_n, 3085 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n) 3086 || (i1dest_in_i0src && !i0_feeds_i1_n)), 3087 &i3dest_killed)) 3088 { 3089 undo_all (); 3090 return 0; 3091 } 3092 3093 /* See if any of the insns is a MULT operation. Unless one is, we will 3094 reject a combination that is, since it must be slower. Be conservative 3095 here. */ 3096 if (GET_CODE (i2src) == MULT 3097 || (i1 != 0 && GET_CODE (i1src) == MULT) 3098 || (i0 != 0 && GET_CODE (i0src) == MULT) 3099 || (GET_CODE (PATTERN (i3)) == SET 3100 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT)) 3101 have_mult = 1; 3102 3103 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd. 3104 We used to do this EXCEPT in one case: I3 has a post-inc in an 3105 output operand. However, that exception can give rise to insns like 3106 mov r3,(r3)+ 3107 which is a famous insn on the PDP-11 where the value of r3 used as the 3108 source was model-dependent. Avoid this sort of thing. */ 3109 3110 #if 0 3111 if (!(GET_CODE (PATTERN (i3)) == SET 3112 && REG_P (SET_SRC (PATTERN (i3))) 3113 && MEM_P (SET_DEST (PATTERN (i3))) 3114 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC 3115 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC))) 3116 /* It's not the exception. */ 3117 #endif 3118 if (AUTO_INC_DEC) 3119 { 3120 rtx link; 3121 for (link = REG_NOTES (i3); link; link = XEXP (link, 1)) 3122 if (REG_NOTE_KIND (link) == REG_INC 3123 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2)) 3124 || (i1 != 0 3125 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1))))) 3126 { 3127 undo_all (); 3128 return 0; 3129 } 3130 } 3131 3132 /* See if the SETs in I1 or I2 need to be kept around in the merged 3133 instruction: whenever the value set there is still needed past I3. 3134 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3. 3135 3136 For the SET in I1, we have two cases: if I1 and I2 independently feed 3137 into I3, the set in I1 needs to be kept around unless I1DEST dies 3138 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set 3139 in I1 needs to be kept around unless I1DEST dies or is set in either 3140 I2 or I3. The same considerations apply to I0. */ 3141 3142 added_sets_2 = !dead_or_set_p (i3, i2dest); 3143 3144 if (i1) 3145 added_sets_1 = !(dead_or_set_p (i3, i1dest) 3146 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest))); 3147 else 3148 added_sets_1 = 0; 3149 3150 if (i0) 3151 added_sets_0 = !(dead_or_set_p (i3, i0dest) 3152 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)) 3153 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n)) 3154 && dead_or_set_p (i2, i0dest))); 3155 else 3156 added_sets_0 = 0; 3157 3158 /* We are about to copy insns for the case where they need to be kept 3159 around. Check that they can be copied in the merged instruction. */ 3160 3161 if (targetm.cannot_copy_insn_p 3162 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2)) 3163 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1)) 3164 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0)))) 3165 { 3166 undo_all (); 3167 return 0; 3168 } 3169 3170 /* If the set in I2 needs to be kept around, we must make a copy of 3171 PATTERN (I2), so that when we substitute I1SRC for I1DEST in 3172 PATTERN (I2), we are only substituting for the original I1DEST, not into 3173 an already-substituted copy. This also prevents making self-referential 3174 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to 3175 I2DEST. */ 3176 3177 if (added_sets_2) 3178 { 3179 if (GET_CODE (PATTERN (i2)) == PARALLEL) 3180 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src)); 3181 else 3182 i2pat = copy_rtx (PATTERN (i2)); 3183 } 3184 3185 if (added_sets_1) 3186 { 3187 if (GET_CODE (PATTERN (i1)) == PARALLEL) 3188 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src)); 3189 else 3190 i1pat = copy_rtx (PATTERN (i1)); 3191 } 3192 3193 if (added_sets_0) 3194 { 3195 if (GET_CODE (PATTERN (i0)) == PARALLEL) 3196 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src)); 3197 else 3198 i0pat = copy_rtx (PATTERN (i0)); 3199 } 3200 3201 combine_merges++; 3202 3203 /* Substitute in the latest insn for the regs set by the earlier ones. */ 3204 3205 maxreg = max_reg_num (); 3206 3207 subst_insn = i3; 3208 3209 /* Many machines that don't use CC0 have insns that can both perform an 3210 arithmetic operation and set the condition code. These operations will 3211 be represented as a PARALLEL with the first element of the vector 3212 being a COMPARE of an arithmetic operation with the constant zero. 3213 The second element of the vector will set some pseudo to the result 3214 of the same arithmetic operation. If we simplify the COMPARE, we won't 3215 match such a pattern and so will generate an extra insn. Here we test 3216 for this case, where both the comparison and the operation result are 3217 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with 3218 I2SRC. Later we will make the PARALLEL that contains I2. */ 3219 3220 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET 3221 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE 3222 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1)) 3223 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest)) 3224 { 3225 rtx newpat_dest; 3226 rtx *cc_use_loc = NULL; 3227 rtx_insn *cc_use_insn = NULL; 3228 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1); 3229 machine_mode compare_mode, orig_compare_mode; 3230 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN; 3231 scalar_int_mode mode; 3232 3233 newpat = PATTERN (i3); 3234 newpat_dest = SET_DEST (newpat); 3235 compare_mode = orig_compare_mode = GET_MODE (newpat_dest); 3236 3237 if (undobuf.other_insn == 0 3238 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3, 3239 &cc_use_insn))) 3240 { 3241 compare_code = orig_compare_code = GET_CODE (*cc_use_loc); 3242 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode)) 3243 compare_code = simplify_compare_const (compare_code, mode, 3244 op0, &op1); 3245 target_canonicalize_comparison (&compare_code, &op0, &op1, 1); 3246 } 3247 3248 /* Do the rest only if op1 is const0_rtx, which may be the 3249 result of simplification. */ 3250 if (op1 == const0_rtx) 3251 { 3252 /* If a single use of the CC is found, prepare to modify it 3253 when SELECT_CC_MODE returns a new CC-class mode, or when 3254 the above simplify_compare_const() returned a new comparison 3255 operator. undobuf.other_insn is assigned the CC use insn 3256 when modifying it. */ 3257 if (cc_use_loc) 3258 { 3259 #ifdef SELECT_CC_MODE 3260 machine_mode new_mode 3261 = SELECT_CC_MODE (compare_code, op0, op1); 3262 if (new_mode != orig_compare_mode 3263 && can_change_dest_mode (SET_DEST (newpat), 3264 added_sets_2, new_mode)) 3265 { 3266 unsigned int regno = REGNO (newpat_dest); 3267 compare_mode = new_mode; 3268 if (regno < FIRST_PSEUDO_REGISTER) 3269 newpat_dest = gen_rtx_REG (compare_mode, regno); 3270 else 3271 { 3272 SUBST_MODE (regno_reg_rtx[regno], compare_mode); 3273 newpat_dest = regno_reg_rtx[regno]; 3274 } 3275 } 3276 #endif 3277 /* Cases for modifying the CC-using comparison. */ 3278 if (compare_code != orig_compare_code 3279 /* ??? Do we need to verify the zero rtx? */ 3280 && XEXP (*cc_use_loc, 1) == const0_rtx) 3281 { 3282 /* Replace cc_use_loc with entire new RTX. */ 3283 SUBST (*cc_use_loc, 3284 gen_rtx_fmt_ee (compare_code, compare_mode, 3285 newpat_dest, const0_rtx)); 3286 undobuf.other_insn = cc_use_insn; 3287 } 3288 else if (compare_mode != orig_compare_mode) 3289 { 3290 /* Just replace the CC reg with a new mode. */ 3291 SUBST (XEXP (*cc_use_loc, 0), newpat_dest); 3292 undobuf.other_insn = cc_use_insn; 3293 } 3294 } 3295 3296 /* Now we modify the current newpat: 3297 First, SET_DEST(newpat) is updated if the CC mode has been 3298 altered. For targets without SELECT_CC_MODE, this should be 3299 optimized away. */ 3300 if (compare_mode != orig_compare_mode) 3301 SUBST (SET_DEST (newpat), newpat_dest); 3302 /* This is always done to propagate i2src into newpat. */ 3303 SUBST (SET_SRC (newpat), 3304 gen_rtx_COMPARE (compare_mode, op0, op1)); 3305 /* Create new version of i2pat if needed; the below PARALLEL 3306 creation needs this to work correctly. */ 3307 if (! rtx_equal_p (i2src, op0)) 3308 i2pat = gen_rtx_SET (i2dest, op0); 3309 i2_is_used = 1; 3310 } 3311 } 3312 3313 if (i2_is_used == 0) 3314 { 3315 /* It is possible that the source of I2 or I1 may be performing 3316 an unneeded operation, such as a ZERO_EXTEND of something 3317 that is known to have the high part zero. Handle that case 3318 by letting subst look at the inner insns. 3319 3320 Another way to do this would be to have a function that tries 3321 to simplify a single insn instead of merging two or more 3322 insns. We don't do this because of the potential of infinite 3323 loops and because of the potential extra memory required. 3324 However, doing it the way we are is a bit of a kludge and 3325 doesn't catch all cases. 3326 3327 But only do this if -fexpensive-optimizations since it slows 3328 things down and doesn't usually win. 3329 3330 This is not done in the COMPARE case above because the 3331 unmodified I2PAT is used in the PARALLEL and so a pattern 3332 with a modified I2SRC would not match. */ 3333 3334 if (flag_expensive_optimizations) 3335 { 3336 /* Pass pc_rtx so no substitutions are done, just 3337 simplifications. */ 3338 if (i1) 3339 { 3340 subst_low_luid = DF_INSN_LUID (i1); 3341 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0); 3342 } 3343 3344 subst_low_luid = DF_INSN_LUID (i2); 3345 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0); 3346 } 3347 3348 n_occurrences = 0; /* `subst' counts here */ 3349 subst_low_luid = DF_INSN_LUID (i2); 3350 3351 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique 3352 copy of I2SRC each time we substitute it, in order to avoid creating 3353 self-referential RTL when we will be substituting I1SRC for I1DEST 3354 later. Likewise if I0 feeds into I2, either directly or indirectly 3355 through I1, and I0DEST is in I0SRC. */ 3356 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0, 3357 (i1_feeds_i2_n && i1dest_in_i1src) 3358 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n)) 3359 && i0dest_in_i0src)); 3360 substed_i2 = 1; 3361 3362 /* Record whether I2's body now appears within I3's body. */ 3363 i2_is_used = n_occurrences; 3364 } 3365 3366 /* If we already got a failure, don't try to do more. Otherwise, try to 3367 substitute I1 if we have it. */ 3368 3369 if (i1 && GET_CODE (newpat) != CLOBBER) 3370 { 3371 /* Check that an autoincrement side-effect on I1 has not been lost. 3372 This happens if I1DEST is mentioned in I2 and dies there, and 3373 has disappeared from the new pattern. */ 3374 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0 3375 && i1_feeds_i2_n 3376 && dead_or_set_p (i2, i1dest) 3377 && !reg_overlap_mentioned_p (i1dest, newpat)) 3378 /* Before we can do this substitution, we must redo the test done 3379 above (see detailed comments there) that ensures I1DEST isn't 3380 mentioned in any SETs in NEWPAT that are field assignments. */ 3381 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX, 3382 0, 0, 0)) 3383 { 3384 undo_all (); 3385 return 0; 3386 } 3387 3388 n_occurrences = 0; 3389 subst_low_luid = DF_INSN_LUID (i1); 3390 3391 /* If the following substitution will modify I1SRC, make a copy of it 3392 for the case where it is substituted for I1DEST in I2PAT later. */ 3393 if (added_sets_2 && i1_feeds_i2_n) 3394 i1src_copy = copy_rtx (i1src); 3395 3396 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique 3397 copy of I1SRC each time we substitute it, in order to avoid creating 3398 self-referential RTL when we will be substituting I0SRC for I0DEST 3399 later. */ 3400 newpat = subst (newpat, i1dest, i1src, 0, 0, 3401 i0_feeds_i1_n && i0dest_in_i0src); 3402 substed_i1 = 1; 3403 3404 /* Record whether I1's body now appears within I3's body. */ 3405 i1_is_used = n_occurrences; 3406 } 3407 3408 /* Likewise for I0 if we have it. */ 3409 3410 if (i0 && GET_CODE (newpat) != CLOBBER) 3411 { 3412 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0 3413 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest)) 3414 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))) 3415 && !reg_overlap_mentioned_p (i0dest, newpat)) 3416 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX, 3417 0, 0, 0)) 3418 { 3419 undo_all (); 3420 return 0; 3421 } 3422 3423 /* If the following substitution will modify I0SRC, make a copy of it 3424 for the case where it is substituted for I0DEST in I1PAT later. */ 3425 if (added_sets_1 && i0_feeds_i1_n) 3426 i0src_copy = copy_rtx (i0src); 3427 /* And a copy for I0DEST in I2PAT substitution. */ 3428 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n) 3429 || (i0_feeds_i2_n))) 3430 i0src_copy2 = copy_rtx (i0src); 3431 3432 n_occurrences = 0; 3433 subst_low_luid = DF_INSN_LUID (i0); 3434 newpat = subst (newpat, i0dest, i0src, 0, 0, 0); 3435 substed_i0 = 1; 3436 } 3437 3438 /* Fail if an autoincrement side-effect has been duplicated. Be careful 3439 to count all the ways that I2SRC and I1SRC can be used. */ 3440 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0 3441 && i2_is_used + added_sets_2 > 1) 3442 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0 3443 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n) 3444 > 1)) 3445 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0 3446 && (n_occurrences + added_sets_0 3447 + (added_sets_1 && i0_feeds_i1_n) 3448 + (added_sets_2 && i0_feeds_i2_n) 3449 > 1)) 3450 /* Fail if we tried to make a new register. */ 3451 || max_reg_num () != maxreg 3452 /* Fail if we couldn't do something and have a CLOBBER. */ 3453 || GET_CODE (newpat) == CLOBBER 3454 /* Fail if this new pattern is a MULT and we didn't have one before 3455 at the outer level. */ 3456 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT 3457 && ! have_mult)) 3458 { 3459 undo_all (); 3460 return 0; 3461 } 3462 3463 /* If the actions of the earlier insns must be kept 3464 in addition to substituting them into the latest one, 3465 we must make a new PARALLEL for the latest insn 3466 to hold additional the SETs. */ 3467 3468 if (added_sets_0 || added_sets_1 || added_sets_2) 3469 { 3470 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2; 3471 combine_extras++; 3472 3473 if (GET_CODE (newpat) == PARALLEL) 3474 { 3475 rtvec old = XVEC (newpat, 0); 3476 total_sets = XVECLEN (newpat, 0) + extra_sets; 3477 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets)); 3478 memcpy (XVEC (newpat, 0)->elem, &old->elem[0], 3479 sizeof (old->elem[0]) * old->num_elem); 3480 } 3481 else 3482 { 3483 rtx old = newpat; 3484 total_sets = 1 + extra_sets; 3485 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets)); 3486 XVECEXP (newpat, 0, 0) = old; 3487 } 3488 3489 if (added_sets_0) 3490 XVECEXP (newpat, 0, --total_sets) = i0pat; 3491 3492 if (added_sets_1) 3493 { 3494 rtx t = i1pat; 3495 if (i0_feeds_i1_n) 3496 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0); 3497 3498 XVECEXP (newpat, 0, --total_sets) = t; 3499 } 3500 if (added_sets_2) 3501 { 3502 rtx t = i2pat; 3503 if (i1_feeds_i2_n) 3504 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0, 3505 i0_feeds_i1_n && i0dest_in_i0src); 3506 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n) 3507 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0); 3508 3509 XVECEXP (newpat, 0, --total_sets) = t; 3510 } 3511 } 3512 3513 validate_replacement: 3514 3515 /* Note which hard regs this insn has as inputs. */ 3516 mark_used_regs_combine (newpat); 3517 3518 /* If recog_for_combine fails, it strips existing clobbers. If we'll 3519 consider splitting this pattern, we might need these clobbers. */ 3520 if (i1 && GET_CODE (newpat) == PARALLEL 3521 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER) 3522 { 3523 int len = XVECLEN (newpat, 0); 3524 3525 newpat_vec_with_clobbers = rtvec_alloc (len); 3526 for (i = 0; i < len; i++) 3527 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i); 3528 } 3529 3530 /* We have recognized nothing yet. */ 3531 insn_code_number = -1; 3532 3533 /* See if this is a PARALLEL of two SETs where one SET's destination is 3534 a register that is unused and this isn't marked as an instruction that 3535 might trap in an EH region. In that case, we just need the other SET. 3536 We prefer this over the PARALLEL. 3537 3538 This can occur when simplifying a divmod insn. We *must* test for this 3539 case here because the code below that splits two independent SETs doesn't 3540 handle this case correctly when it updates the register status. 3541 3542 It's pointless doing this if we originally had two sets, one from 3543 i3, and one from i2. Combining then splitting the parallel results 3544 in the original i2 again plus an invalid insn (which we delete). 3545 The net effect is only to move instructions around, which makes 3546 debug info less accurate. 3547 3548 If the remaining SET came from I2 its destination should not be used 3549 between I2 and I3. See PR82024. */ 3550 3551 if (!(added_sets_2 && i1 == 0) 3552 && is_parallel_of_n_reg_sets (newpat, 2) 3553 && asm_noperands (newpat) < 0) 3554 { 3555 rtx set0 = XVECEXP (newpat, 0, 0); 3556 rtx set1 = XVECEXP (newpat, 0, 1); 3557 rtx oldpat = newpat; 3558 3559 if (((REG_P (SET_DEST (set1)) 3560 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1))) 3561 || (GET_CODE (SET_DEST (set1)) == SUBREG 3562 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1))))) 3563 && insn_nothrow_p (i3) 3564 && !side_effects_p (SET_SRC (set1))) 3565 { 3566 newpat = set0; 3567 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); 3568 } 3569 3570 else if (((REG_P (SET_DEST (set0)) 3571 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0))) 3572 || (GET_CODE (SET_DEST (set0)) == SUBREG 3573 && find_reg_note (i3, REG_UNUSED, 3574 SUBREG_REG (SET_DEST (set0))))) 3575 && insn_nothrow_p (i3) 3576 && !side_effects_p (SET_SRC (set0))) 3577 { 3578 rtx dest = SET_DEST (set1); 3579 if (GET_CODE (dest) == SUBREG) 3580 dest = SUBREG_REG (dest); 3581 if (!reg_used_between_p (dest, i2, i3)) 3582 { 3583 newpat = set1; 3584 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); 3585 3586 if (insn_code_number >= 0) 3587 changed_i3_dest = 1; 3588 } 3589 } 3590 3591 if (insn_code_number < 0) 3592 newpat = oldpat; 3593 } 3594 3595 /* Is the result of combination a valid instruction? */ 3596 if (insn_code_number < 0) 3597 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); 3598 3599 /* If we were combining three insns and the result is a simple SET 3600 with no ASM_OPERANDS that wasn't recognized, try to split it into two 3601 insns. There are two ways to do this. It can be split using a 3602 machine-specific method (like when you have an addition of a large 3603 constant) or by combine in the function find_split_point. */ 3604 3605 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET 3606 && asm_noperands (newpat) < 0) 3607 { 3608 rtx parallel, *split; 3609 rtx_insn *m_split_insn; 3610 3611 /* See if the MD file can split NEWPAT. If it can't, see if letting it 3612 use I2DEST as a scratch register will help. In the latter case, 3613 convert I2DEST to the mode of the source of NEWPAT if we can. */ 3614 3615 m_split_insn = combine_split_insns (newpat, i3); 3616 3617 /* We can only use I2DEST as a scratch reg if it doesn't overlap any 3618 inputs of NEWPAT. */ 3619 3620 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be 3621 possible to try that as a scratch reg. This would require adding 3622 more code to make it work though. */ 3623 3624 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat)) 3625 { 3626 machine_mode new_mode = GET_MODE (SET_DEST (newpat)); 3627 3628 /* ??? Reusing i2dest without resetting the reg_stat entry for it 3629 (temporarily, until we are committed to this instruction 3630 combination) does not work: for example, any call to nonzero_bits 3631 on the register (from a splitter in the MD file, for example) 3632 will get the old information, which is invalid. 3633 3634 Since nowadays we can create registers during combine just fine, 3635 we should just create a new one here, not reuse i2dest. */ 3636 3637 /* First try to split using the original register as a 3638 scratch register. */ 3639 parallel = gen_rtx_PARALLEL (VOIDmode, 3640 gen_rtvec (2, newpat, 3641 gen_rtx_CLOBBER (VOIDmode, 3642 i2dest))); 3643 m_split_insn = combine_split_insns (parallel, i3); 3644 3645 /* If that didn't work, try changing the mode of I2DEST if 3646 we can. */ 3647 if (m_split_insn == 0 3648 && new_mode != GET_MODE (i2dest) 3649 && new_mode != VOIDmode 3650 && can_change_dest_mode (i2dest, added_sets_2, new_mode)) 3651 { 3652 machine_mode old_mode = GET_MODE (i2dest); 3653 rtx ni2dest; 3654 3655 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER) 3656 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest)); 3657 else 3658 { 3659 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode); 3660 ni2dest = regno_reg_rtx[REGNO (i2dest)]; 3661 } 3662 3663 parallel = (gen_rtx_PARALLEL 3664 (VOIDmode, 3665 gen_rtvec (2, newpat, 3666 gen_rtx_CLOBBER (VOIDmode, 3667 ni2dest)))); 3668 m_split_insn = combine_split_insns (parallel, i3); 3669 3670 if (m_split_insn == 0 3671 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER) 3672 { 3673 struct undo *buf; 3674 3675 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode); 3676 buf = undobuf.undos; 3677 undobuf.undos = buf->next; 3678 buf->next = undobuf.frees; 3679 undobuf.frees = buf; 3680 } 3681 } 3682 3683 i2scratch = m_split_insn != 0; 3684 } 3685 3686 /* If recog_for_combine has discarded clobbers, try to use them 3687 again for the split. */ 3688 if (m_split_insn == 0 && newpat_vec_with_clobbers) 3689 { 3690 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers); 3691 m_split_insn = combine_split_insns (parallel, i3); 3692 } 3693 3694 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX) 3695 { 3696 rtx m_split_pat = PATTERN (m_split_insn); 3697 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes); 3698 if (insn_code_number >= 0) 3699 newpat = m_split_pat; 3700 } 3701 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX 3702 && (next_nonnote_nondebug_insn (i2) == i3 3703 || !modified_between_p (PATTERN (m_split_insn), i2, i3))) 3704 { 3705 rtx i2set, i3set; 3706 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn)); 3707 newi2pat = PATTERN (m_split_insn); 3708 3709 i3set = single_set (NEXT_INSN (m_split_insn)); 3710 i2set = single_set (m_split_insn); 3711 3712 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); 3713 3714 /* If I2 or I3 has multiple SETs, we won't know how to track 3715 register status, so don't use these insns. If I2's destination 3716 is used between I2 and I3, we also can't use these insns. */ 3717 3718 if (i2_code_number >= 0 && i2set && i3set 3719 && (next_nonnote_nondebug_insn (i2) == i3 3720 || ! reg_used_between_p (SET_DEST (i2set), i2, i3))) 3721 insn_code_number = recog_for_combine (&newi3pat, i3, 3722 &new_i3_notes); 3723 if (insn_code_number >= 0) 3724 newpat = newi3pat; 3725 3726 /* It is possible that both insns now set the destination of I3. 3727 If so, we must show an extra use of it. */ 3728 3729 if (insn_code_number >= 0) 3730 { 3731 rtx new_i3_dest = SET_DEST (i3set); 3732 rtx new_i2_dest = SET_DEST (i2set); 3733 3734 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT 3735 || GET_CODE (new_i3_dest) == STRICT_LOW_PART 3736 || GET_CODE (new_i3_dest) == SUBREG) 3737 new_i3_dest = XEXP (new_i3_dest, 0); 3738 3739 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT 3740 || GET_CODE (new_i2_dest) == STRICT_LOW_PART 3741 || GET_CODE (new_i2_dest) == SUBREG) 3742 new_i2_dest = XEXP (new_i2_dest, 0); 3743 3744 if (REG_P (new_i3_dest) 3745 && REG_P (new_i2_dest) 3746 && REGNO (new_i3_dest) == REGNO (new_i2_dest) 3747 && REGNO (new_i2_dest) < reg_n_sets_max) 3748 INC_REG_N_SETS (REGNO (new_i2_dest), 1); 3749 } 3750 } 3751 3752 /* If we can split it and use I2DEST, go ahead and see if that 3753 helps things be recognized. Verify that none of the registers 3754 are set between I2 and I3. */ 3755 if (insn_code_number < 0 3756 && (split = find_split_point (&newpat, i3, false)) != 0 3757 && (!HAVE_cc0 || REG_P (i2dest)) 3758 /* We need I2DEST in the proper mode. If it is a hard register 3759 or the only use of a pseudo, we can change its mode. 3760 Make sure we don't change a hard register to have a mode that 3761 isn't valid for it, or change the number of registers. */ 3762 && (GET_MODE (*split) == GET_MODE (i2dest) 3763 || GET_MODE (*split) == VOIDmode 3764 || can_change_dest_mode (i2dest, added_sets_2, 3765 GET_MODE (*split))) 3766 && (next_nonnote_nondebug_insn (i2) == i3 3767 || !modified_between_p (*split, i2, i3)) 3768 /* We can't overwrite I2DEST if its value is still used by 3769 NEWPAT. */ 3770 && ! reg_referenced_p (i2dest, newpat)) 3771 { 3772 rtx newdest = i2dest; 3773 enum rtx_code split_code = GET_CODE (*split); 3774 machine_mode split_mode = GET_MODE (*split); 3775 bool subst_done = false; 3776 newi2pat = NULL_RTX; 3777 3778 i2scratch = true; 3779 3780 /* *SPLIT may be part of I2SRC, so make sure we have the 3781 original expression around for later debug processing. 3782 We should not need I2SRC any more in other cases. */ 3783 if (MAY_HAVE_DEBUG_BIND_INSNS) 3784 i2src = copy_rtx (i2src); 3785 else 3786 i2src = NULL; 3787 3788 /* Get NEWDEST as a register in the proper mode. We have already 3789 validated that we can do this. */ 3790 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode) 3791 { 3792 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER) 3793 newdest = gen_rtx_REG (split_mode, REGNO (i2dest)); 3794 else 3795 { 3796 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode); 3797 newdest = regno_reg_rtx[REGNO (i2dest)]; 3798 } 3799 } 3800 3801 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to 3802 an ASHIFT. This can occur if it was inside a PLUS and hence 3803 appeared to be a memory address. This is a kludge. */ 3804 if (split_code == MULT 3805 && CONST_INT_P (XEXP (*split, 1)) 3806 && INTVAL (XEXP (*split, 1)) > 0 3807 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0) 3808 { 3809 rtx i_rtx = gen_int_shift_amount (split_mode, i); 3810 SUBST (*split, gen_rtx_ASHIFT (split_mode, 3811 XEXP (*split, 0), i_rtx)); 3812 /* Update split_code because we may not have a multiply 3813 anymore. */ 3814 split_code = GET_CODE (*split); 3815 } 3816 3817 /* Similarly for (plus (mult FOO (const_int pow2))). */ 3818 if (split_code == PLUS 3819 && GET_CODE (XEXP (*split, 0)) == MULT 3820 && CONST_INT_P (XEXP (XEXP (*split, 0), 1)) 3821 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0 3822 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0) 3823 { 3824 rtx nsplit = XEXP (*split, 0); 3825 rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i); 3826 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit), 3827 XEXP (nsplit, 0), 3828 i_rtx)); 3829 /* Update split_code because we may not have a multiply 3830 anymore. */ 3831 split_code = GET_CODE (*split); 3832 } 3833 3834 #ifdef INSN_SCHEDULING 3835 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should 3836 be written as a ZERO_EXTEND. */ 3837 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split))) 3838 { 3839 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's 3840 what it really is. */ 3841 if (load_extend_op (GET_MODE (SUBREG_REG (*split))) 3842 == SIGN_EXTEND) 3843 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode, 3844 SUBREG_REG (*split))); 3845 else 3846 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode, 3847 SUBREG_REG (*split))); 3848 } 3849 #endif 3850 3851 /* Attempt to split binary operators using arithmetic identities. */ 3852 if (BINARY_P (SET_SRC (newpat)) 3853 && split_mode == GET_MODE (SET_SRC (newpat)) 3854 && ! side_effects_p (SET_SRC (newpat))) 3855 { 3856 rtx setsrc = SET_SRC (newpat); 3857 machine_mode mode = GET_MODE (setsrc); 3858 enum rtx_code code = GET_CODE (setsrc); 3859 rtx src_op0 = XEXP (setsrc, 0); 3860 rtx src_op1 = XEXP (setsrc, 1); 3861 3862 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */ 3863 if (rtx_equal_p (src_op0, src_op1)) 3864 { 3865 newi2pat = gen_rtx_SET (newdest, src_op0); 3866 SUBST (XEXP (setsrc, 0), newdest); 3867 SUBST (XEXP (setsrc, 1), newdest); 3868 subst_done = true; 3869 } 3870 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */ 3871 else if ((code == PLUS || code == MULT) 3872 && GET_CODE (src_op0) == code 3873 && GET_CODE (XEXP (src_op0, 0)) == code 3874 && (INTEGRAL_MODE_P (mode) 3875 || (FLOAT_MODE_P (mode) 3876 && flag_unsafe_math_optimizations))) 3877 { 3878 rtx p = XEXP (XEXP (src_op0, 0), 0); 3879 rtx q = XEXP (XEXP (src_op0, 0), 1); 3880 rtx r = XEXP (src_op0, 1); 3881 rtx s = src_op1; 3882 3883 /* Split both "((X op Y) op X) op Y" and 3884 "((X op Y) op Y) op X" as "T op T" where T is 3885 "X op Y". */ 3886 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s)) 3887 || (rtx_equal_p (p,s) && rtx_equal_p (q,r))) 3888 { 3889 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0)); 3890 SUBST (XEXP (setsrc, 0), newdest); 3891 SUBST (XEXP (setsrc, 1), newdest); 3892 subst_done = true; 3893 } 3894 /* Split "((X op X) op Y) op Y)" as "T op T" where 3895 T is "X op Y". */ 3896 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s)) 3897 { 3898 rtx tmp = simplify_gen_binary (code, mode, p, r); 3899 newi2pat = gen_rtx_SET (newdest, tmp); 3900 SUBST (XEXP (setsrc, 0), newdest); 3901 SUBST (XEXP (setsrc, 1), newdest); 3902 subst_done = true; 3903 } 3904 } 3905 } 3906 3907 if (!subst_done) 3908 { 3909 newi2pat = gen_rtx_SET (newdest, *split); 3910 SUBST (*split, newdest); 3911 } 3912 3913 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); 3914 3915 /* recog_for_combine might have added CLOBBERs to newi2pat. 3916 Make sure NEWPAT does not depend on the clobbered regs. */ 3917 if (GET_CODE (newi2pat) == PARALLEL) 3918 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--) 3919 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER) 3920 { 3921 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0); 3922 if (reg_overlap_mentioned_p (reg, newpat)) 3923 { 3924 undo_all (); 3925 return 0; 3926 } 3927 } 3928 3929 /* If the split point was a MULT and we didn't have one before, 3930 don't use one now. */ 3931 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult)) 3932 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); 3933 } 3934 } 3935 3936 /* Check for a case where we loaded from memory in a narrow mode and 3937 then sign extended it, but we need both registers. In that case, 3938 we have a PARALLEL with both loads from the same memory location. 3939 We can split this into a load from memory followed by a register-register 3940 copy. This saves at least one insn, more if register allocation can 3941 eliminate the copy. 3942 3943 We cannot do this if the destination of the first assignment is a 3944 condition code register or cc0. We eliminate this case by making sure 3945 the SET_DEST and SET_SRC have the same mode. 3946 3947 We cannot do this if the destination of the second assignment is 3948 a register that we have already assumed is zero-extended. Similarly 3949 for a SUBREG of such a register. */ 3950 3951 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0 3952 && GET_CODE (newpat) == PARALLEL 3953 && XVECLEN (newpat, 0) == 2 3954 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET 3955 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND 3956 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0))) 3957 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0)))) 3958 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET 3959 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)), 3960 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0)) 3961 && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3) 3962 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT 3963 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART 3964 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)), 3965 (REG_P (temp_expr) 3966 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0 3967 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)), 3968 BITS_PER_WORD) 3969 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)), 3970 HOST_BITS_PER_INT) 3971 && (reg_stat[REGNO (temp_expr)].nonzero_bits 3972 != GET_MODE_MASK (word_mode)))) 3973 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG 3974 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))), 3975 (REG_P (temp_expr) 3976 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0 3977 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)), 3978 BITS_PER_WORD) 3979 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)), 3980 HOST_BITS_PER_INT) 3981 && (reg_stat[REGNO (temp_expr)].nonzero_bits 3982 != GET_MODE_MASK (word_mode))))) 3983 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)), 3984 SET_SRC (XVECEXP (newpat, 0, 1))) 3985 && ! find_reg_note (i3, REG_UNUSED, 3986 SET_DEST (XVECEXP (newpat, 0, 0)))) 3987 { 3988 rtx ni2dest; 3989 3990 newi2pat = XVECEXP (newpat, 0, 0); 3991 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0)); 3992 newpat = XVECEXP (newpat, 0, 1); 3993 SUBST (SET_SRC (newpat), 3994 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest)); 3995 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); 3996 3997 if (i2_code_number >= 0) 3998 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); 3999 4000 if (insn_code_number >= 0) 4001 swap_i2i3 = 1; 4002 } 4003 4004 /* Similarly, check for a case where we have a PARALLEL of two independent 4005 SETs but we started with three insns. In this case, we can do the sets 4006 as two separate insns. This case occurs when some SET allows two 4007 other insns to combine, but the destination of that SET is still live. 4008 4009 Also do this if we started with two insns and (at least) one of the 4010 resulting sets is a noop; this noop will be deleted later. */ 4011 4012 else if (insn_code_number < 0 && asm_noperands (newpat) < 0 4013 && GET_CODE (newpat) == PARALLEL 4014 && XVECLEN (newpat, 0) == 2 4015 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET 4016 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET 4017 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0)) 4018 || set_noop_p (XVECEXP (newpat, 0, 1))) 4019 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT 4020 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART 4021 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT 4022 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART 4023 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)), 4024 XVECEXP (newpat, 0, 0)) 4025 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)), 4026 XVECEXP (newpat, 0, 1)) 4027 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0))) 4028 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1))))) 4029 { 4030 rtx set0 = XVECEXP (newpat, 0, 0); 4031 rtx set1 = XVECEXP (newpat, 0, 1); 4032 4033 /* Normally, it doesn't matter which of the two is done first, 4034 but the one that references cc0 can't be the second, and 4035 one which uses any regs/memory set in between i2 and i3 can't 4036 be first. The PARALLEL might also have been pre-existing in i3, 4037 so we need to make sure that we won't wrongly hoist a SET to i2 4038 that would conflict with a death note present in there. */ 4039 if (!modified_between_p (SET_SRC (set1), i2, i3) 4040 && !(REG_P (SET_DEST (set1)) 4041 && find_reg_note (i2, REG_DEAD, SET_DEST (set1))) 4042 && !(GET_CODE (SET_DEST (set1)) == SUBREG 4043 && find_reg_note (i2, REG_DEAD, 4044 SUBREG_REG (SET_DEST (set1)))) 4045 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0)) 4046 /* If I3 is a jump, ensure that set0 is a jump so that 4047 we do not create invalid RTL. */ 4048 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx) 4049 ) 4050 { 4051 newi2pat = set1; 4052 newpat = set0; 4053 } 4054 else if (!modified_between_p (SET_SRC (set0), i2, i3) 4055 && !(REG_P (SET_DEST (set0)) 4056 && find_reg_note (i2, REG_DEAD, SET_DEST (set0))) 4057 && !(GET_CODE (SET_DEST (set0)) == SUBREG 4058 && find_reg_note (i2, REG_DEAD, 4059 SUBREG_REG (SET_DEST (set0)))) 4060 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1)) 4061 /* If I3 is a jump, ensure that set1 is a jump so that 4062 we do not create invalid RTL. */ 4063 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx) 4064 ) 4065 { 4066 newi2pat = set0; 4067 newpat = set1; 4068 } 4069 else 4070 { 4071 undo_all (); 4072 return 0; 4073 } 4074 4075 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); 4076 4077 if (i2_code_number >= 0) 4078 { 4079 /* recog_for_combine might have added CLOBBERs to newi2pat. 4080 Make sure NEWPAT does not depend on the clobbered regs. */ 4081 if (GET_CODE (newi2pat) == PARALLEL) 4082 { 4083 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--) 4084 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER) 4085 { 4086 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0); 4087 if (reg_overlap_mentioned_p (reg, newpat)) 4088 { 4089 undo_all (); 4090 return 0; 4091 } 4092 } 4093 } 4094 4095 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); 4096 4097 if (insn_code_number >= 0) 4098 split_i2i3 = 1; 4099 } 4100 } 4101 4102 /* If it still isn't recognized, fail and change things back the way they 4103 were. */ 4104 if ((insn_code_number < 0 4105 /* Is the result a reasonable ASM_OPERANDS? */ 4106 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2))) 4107 { 4108 undo_all (); 4109 return 0; 4110 } 4111 4112 /* If we had to change another insn, make sure it is valid also. */ 4113 if (undobuf.other_insn) 4114 { 4115 CLEAR_HARD_REG_SET (newpat_used_regs); 4116 4117 other_pat = PATTERN (undobuf.other_insn); 4118 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn, 4119 &new_other_notes); 4120 4121 if (other_code_number < 0 && ! check_asm_operands (other_pat)) 4122 { 4123 undo_all (); 4124 return 0; 4125 } 4126 } 4127 4128 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether 4129 they are adjacent to each other or not. */ 4130 if (HAVE_cc0) 4131 { 4132 rtx_insn *p = prev_nonnote_insn (i3); 4133 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat 4134 && sets_cc0_p (newi2pat)) 4135 { 4136 undo_all (); 4137 return 0; 4138 } 4139 } 4140 4141 /* Only allow this combination if insn_cost reports that the 4142 replacement instructions are cheaper than the originals. */ 4143 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat)) 4144 { 4145 undo_all (); 4146 return 0; 4147 } 4148 4149 if (MAY_HAVE_DEBUG_BIND_INSNS) 4150 { 4151 struct undo *undo; 4152 4153 for (undo = undobuf.undos; undo; undo = undo->next) 4154 if (undo->kind == UNDO_MODE) 4155 { 4156 rtx reg = *undo->where.r; 4157 machine_mode new_mode = GET_MODE (reg); 4158 machine_mode old_mode = undo->old_contents.m; 4159 4160 /* Temporarily revert mode back. */ 4161 adjust_reg_mode (reg, old_mode); 4162 4163 if (reg == i2dest && i2scratch) 4164 { 4165 /* If we used i2dest as a scratch register with a 4166 different mode, substitute it for the original 4167 i2src while its original mode is temporarily 4168 restored, and then clear i2scratch so that we don't 4169 do it again later. */ 4170 propagate_for_debug (i2, last_combined_insn, reg, i2src, 4171 this_basic_block); 4172 i2scratch = false; 4173 /* Put back the new mode. */ 4174 adjust_reg_mode (reg, new_mode); 4175 } 4176 else 4177 { 4178 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg)); 4179 rtx_insn *first, *last; 4180 4181 if (reg == i2dest) 4182 { 4183 first = i2; 4184 last = last_combined_insn; 4185 } 4186 else 4187 { 4188 first = i3; 4189 last = undobuf.other_insn; 4190 gcc_assert (last); 4191 if (DF_INSN_LUID (last) 4192 < DF_INSN_LUID (last_combined_insn)) 4193 last = last_combined_insn; 4194 } 4195 4196 /* We're dealing with a reg that changed mode but not 4197 meaning, so we want to turn it into a subreg for 4198 the new mode. However, because of REG sharing and 4199 because its mode had already changed, we have to do 4200 it in two steps. First, replace any debug uses of 4201 reg, with its original mode temporarily restored, 4202 with this copy we have created; then, replace the 4203 copy with the SUBREG of the original shared reg, 4204 once again changed to the new mode. */ 4205 propagate_for_debug (first, last, reg, tempreg, 4206 this_basic_block); 4207 adjust_reg_mode (reg, new_mode); 4208 propagate_for_debug (first, last, tempreg, 4209 lowpart_subreg (old_mode, reg, new_mode), 4210 this_basic_block); 4211 } 4212 } 4213 } 4214 4215 /* If we will be able to accept this, we have made a 4216 change to the destination of I3. This requires us to 4217 do a few adjustments. */ 4218 4219 if (changed_i3_dest) 4220 { 4221 PATTERN (i3) = newpat; 4222 adjust_for_new_dest (i3); 4223 } 4224 4225 /* We now know that we can do this combination. Merge the insns and 4226 update the status of registers and LOG_LINKS. */ 4227 4228 if (undobuf.other_insn) 4229 { 4230 rtx note, next; 4231 4232 PATTERN (undobuf.other_insn) = other_pat; 4233 4234 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED, 4235 ensure that they are still valid. Then add any non-duplicate 4236 notes added by recog_for_combine. */ 4237 for (note = REG_NOTES (undobuf.other_insn); note; note = next) 4238 { 4239 next = XEXP (note, 1); 4240 4241 if ((REG_NOTE_KIND (note) == REG_DEAD 4242 && !reg_referenced_p (XEXP (note, 0), 4243 PATTERN (undobuf.other_insn))) 4244 ||(REG_NOTE_KIND (note) == REG_UNUSED 4245 && !reg_set_p (XEXP (note, 0), 4246 PATTERN (undobuf.other_insn))) 4247 /* Simply drop equal note since it may be no longer valid 4248 for other_insn. It may be possible to record that CC 4249 register is changed and only discard those notes, but 4250 in practice it's unnecessary complication and doesn't 4251 give any meaningful improvement. 4252 4253 See PR78559. */ 4254 || REG_NOTE_KIND (note) == REG_EQUAL 4255 || REG_NOTE_KIND (note) == REG_EQUIV) 4256 remove_note (undobuf.other_insn, note); 4257 } 4258 4259 distribute_notes (new_other_notes, undobuf.other_insn, 4260 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX, 4261 NULL_RTX); 4262 } 4263 4264 if (swap_i2i3) 4265 { 4266 /* I3 now uses what used to be its destination and which is now 4267 I2's destination. This requires us to do a few adjustments. */ 4268 PATTERN (i3) = newpat; 4269 adjust_for_new_dest (i3); 4270 } 4271 4272 if (swap_i2i3 || split_i2i3) 4273 { 4274 /* We might need a LOG_LINK from I3 to I2. But then we used to 4275 have one, so we still will. 4276 4277 However, some later insn might be using I2's dest and have 4278 a LOG_LINK pointing at I3. We should change it to point at 4279 I2 instead. */ 4280 4281 /* newi2pat is usually a SET here; however, recog_for_combine might 4282 have added some clobbers. */ 4283 rtx x = newi2pat; 4284 if (GET_CODE (x) == PARALLEL) 4285 x = XVECEXP (newi2pat, 0, 0); 4286 4287 /* It can only be a SET of a REG or of a SUBREG of a REG. */ 4288 unsigned int regno = reg_or_subregno (SET_DEST (x)); 4289 4290 bool done = false; 4291 for (rtx_insn *insn = NEXT_INSN (i3); 4292 !done 4293 && insn 4294 && NONDEBUG_INSN_P (insn) 4295 && BLOCK_FOR_INSN (insn) == this_basic_block; 4296 insn = NEXT_INSN (insn)) 4297 { 4298 struct insn_link *link; 4299 FOR_EACH_LOG_LINK (link, insn) 4300 if (link->insn == i3 && link->regno == regno) 4301 { 4302 link->insn = i2; 4303 done = true; 4304 break; 4305 } 4306 } 4307 } 4308 4309 { 4310 rtx i3notes, i2notes, i1notes = 0, i0notes = 0; 4311 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0; 4312 rtx midnotes = 0; 4313 int from_luid; 4314 /* Compute which registers we expect to eliminate. newi2pat may be setting 4315 either i3dest or i2dest, so we must check it. */ 4316 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat)) 4317 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src 4318 || !i2dest_killed 4319 ? 0 : i2dest); 4320 /* For i1, we need to compute both local elimination and global 4321 elimination information with respect to newi2pat because i1dest 4322 may be the same as i3dest, in which case newi2pat may be setting 4323 i1dest. Global information is used when distributing REG_DEAD 4324 note for i2 and i3, in which case it does matter if newi2pat sets 4325 i1dest or not. 4326 4327 Local information is used when distributing REG_DEAD note for i1, 4328 in which case it doesn't matter if newi2pat sets i1dest or not. 4329 See PR62151, if we have four insns combination: 4330 i0: r0 <- i0src 4331 i1: r1 <- i1src (using r0) 4332 REG_DEAD (r0) 4333 i2: r0 <- i2src (using r1) 4334 i3: r3 <- i3src (using r0) 4335 ix: using r0 4336 From i1's point of view, r0 is eliminated, no matter if it is set 4337 by newi2pat or not. In other words, REG_DEAD info for r0 in i1 4338 should be discarded. 4339 4340 Note local information only affects cases in forms like "I1->I2->I3", 4341 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like 4342 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or 4343 i0dest anyway. */ 4344 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src 4345 || !i1dest_killed 4346 ? 0 : i1dest); 4347 rtx elim_i1 = (local_elim_i1 == 0 4348 || (newi2pat && reg_set_p (i1dest, newi2pat)) 4349 ? 0 : i1dest); 4350 /* Same case as i1. */ 4351 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed 4352 ? 0 : i0dest); 4353 rtx elim_i0 = (local_elim_i0 == 0 4354 || (newi2pat && reg_set_p (i0dest, newi2pat)) 4355 ? 0 : i0dest); 4356 4357 /* Get the old REG_NOTES and LOG_LINKS from all our insns and 4358 clear them. */ 4359 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3); 4360 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2); 4361 if (i1) 4362 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1); 4363 if (i0) 4364 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0); 4365 4366 /* Ensure that we do not have something that should not be shared but 4367 occurs multiple times in the new insns. Check this by first 4368 resetting all the `used' flags and then copying anything is shared. */ 4369 4370 reset_used_flags (i3notes); 4371 reset_used_flags (i2notes); 4372 reset_used_flags (i1notes); 4373 reset_used_flags (i0notes); 4374 reset_used_flags (newpat); 4375 reset_used_flags (newi2pat); 4376 if (undobuf.other_insn) 4377 reset_used_flags (PATTERN (undobuf.other_insn)); 4378 4379 i3notes = copy_rtx_if_shared (i3notes); 4380 i2notes = copy_rtx_if_shared (i2notes); 4381 i1notes = copy_rtx_if_shared (i1notes); 4382 i0notes = copy_rtx_if_shared (i0notes); 4383 newpat = copy_rtx_if_shared (newpat); 4384 newi2pat = copy_rtx_if_shared (newi2pat); 4385 if (undobuf.other_insn) 4386 reset_used_flags (PATTERN (undobuf.other_insn)); 4387 4388 INSN_CODE (i3) = insn_code_number; 4389 PATTERN (i3) = newpat; 4390 4391 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3)) 4392 { 4393 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link; 4394 link = XEXP (link, 1)) 4395 { 4396 if (substed_i2) 4397 { 4398 /* I2SRC must still be meaningful at this point. Some 4399 splitting operations can invalidate I2SRC, but those 4400 operations do not apply to calls. */ 4401 gcc_assert (i2src); 4402 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0), 4403 i2dest, i2src); 4404 } 4405 if (substed_i1) 4406 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0), 4407 i1dest, i1src); 4408 if (substed_i0) 4409 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0), 4410 i0dest, i0src); 4411 } 4412 } 4413 4414 if (undobuf.other_insn) 4415 INSN_CODE (undobuf.other_insn) = other_code_number; 4416 4417 /* We had one special case above where I2 had more than one set and 4418 we replaced a destination of one of those sets with the destination 4419 of I3. In that case, we have to update LOG_LINKS of insns later 4420 in this basic block. Note that this (expensive) case is rare. 4421 4422 Also, in this case, we must pretend that all REG_NOTEs for I2 4423 actually came from I3, so that REG_UNUSED notes from I2 will be 4424 properly handled. */ 4425 4426 if (i3_subst_into_i2) 4427 { 4428 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++) 4429 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET 4430 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER) 4431 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i))) 4432 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest 4433 && ! find_reg_note (i2, REG_UNUSED, 4434 SET_DEST (XVECEXP (PATTERN (i2), 0, i)))) 4435 for (temp_insn = NEXT_INSN (i2); 4436 temp_insn 4437 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) 4438 || BB_HEAD (this_basic_block) != temp_insn); 4439 temp_insn = NEXT_INSN (temp_insn)) 4440 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn)) 4441 FOR_EACH_LOG_LINK (link, temp_insn) 4442 if (link->insn == i2) 4443 link->insn = i3; 4444 4445 if (i3notes) 4446 { 4447 rtx link = i3notes; 4448 while (XEXP (link, 1)) 4449 link = XEXP (link, 1); 4450 XEXP (link, 1) = i2notes; 4451 } 4452 else 4453 i3notes = i2notes; 4454 i2notes = 0; 4455 } 4456 4457 LOG_LINKS (i3) = NULL; 4458 REG_NOTES (i3) = 0; 4459 LOG_LINKS (i2) = NULL; 4460 REG_NOTES (i2) = 0; 4461 4462 if (newi2pat) 4463 { 4464 if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch) 4465 propagate_for_debug (i2, last_combined_insn, i2dest, i2src, 4466 this_basic_block); 4467 INSN_CODE (i2) = i2_code_number; 4468 PATTERN (i2) = newi2pat; 4469 } 4470 else 4471 { 4472 if (MAY_HAVE_DEBUG_BIND_INSNS && i2src) 4473 propagate_for_debug (i2, last_combined_insn, i2dest, i2src, 4474 this_basic_block); 4475 SET_INSN_DELETED (i2); 4476 } 4477 4478 if (i1) 4479 { 4480 LOG_LINKS (i1) = NULL; 4481 REG_NOTES (i1) = 0; 4482 if (MAY_HAVE_DEBUG_BIND_INSNS) 4483 propagate_for_debug (i1, last_combined_insn, i1dest, i1src, 4484 this_basic_block); 4485 SET_INSN_DELETED (i1); 4486 } 4487 4488 if (i0) 4489 { 4490 LOG_LINKS (i0) = NULL; 4491 REG_NOTES (i0) = 0; 4492 if (MAY_HAVE_DEBUG_BIND_INSNS) 4493 propagate_for_debug (i0, last_combined_insn, i0dest, i0src, 4494 this_basic_block); 4495 SET_INSN_DELETED (i0); 4496 } 4497 4498 /* Get death notes for everything that is now used in either I3 or 4499 I2 and used to die in a previous insn. If we built two new 4500 patterns, move from I1 to I2 then I2 to I3 so that we get the 4501 proper movement on registers that I2 modifies. */ 4502 4503 if (i0) 4504 from_luid = DF_INSN_LUID (i0); 4505 else if (i1) 4506 from_luid = DF_INSN_LUID (i1); 4507 else 4508 from_luid = DF_INSN_LUID (i2); 4509 if (newi2pat) 4510 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes); 4511 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes); 4512 4513 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */ 4514 if (i3notes) 4515 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL, 4516 elim_i2, elim_i1, elim_i0); 4517 if (i2notes) 4518 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL, 4519 elim_i2, elim_i1, elim_i0); 4520 if (i1notes) 4521 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL, 4522 elim_i2, local_elim_i1, local_elim_i0); 4523 if (i0notes) 4524 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL, 4525 elim_i2, elim_i1, local_elim_i0); 4526 if (midnotes) 4527 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL, 4528 elim_i2, elim_i1, elim_i0); 4529 4530 /* Distribute any notes added to I2 or I3 by recog_for_combine. We 4531 know these are REG_UNUSED and want them to go to the desired insn, 4532 so we always pass it as i3. */ 4533 4534 if (newi2pat && new_i2_notes) 4535 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX, 4536 NULL_RTX); 4537 4538 if (new_i3_notes) 4539 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX, 4540 NULL_RTX); 4541 4542 /* If I3DEST was used in I3SRC, it really died in I3. We may need to 4543 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets 4544 I3DEST, the death must be somewhere before I2, not I3. If we passed I3 4545 in that case, it might delete I2. Similarly for I2 and I1. 4546 Show an additional death due to the REG_DEAD note we make here. If 4547 we discard it in distribute_notes, we will decrement it again. */ 4548 4549 if (i3dest_killed) 4550 { 4551 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX); 4552 if (newi2pat && reg_set_p (i3dest_killed, newi2pat)) 4553 distribute_notes (new_note, NULL, i2, NULL, elim_i2, 4554 elim_i1, elim_i0); 4555 else 4556 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL, 4557 elim_i2, elim_i1, elim_i0); 4558 } 4559 4560 if (i2dest_in_i2src) 4561 { 4562 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX); 4563 if (newi2pat && reg_set_p (i2dest, newi2pat)) 4564 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX, 4565 NULL_RTX, NULL_RTX); 4566 else 4567 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL, 4568 NULL_RTX, NULL_RTX, NULL_RTX); 4569 } 4570 4571 if (i1dest_in_i1src) 4572 { 4573 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX); 4574 if (newi2pat && reg_set_p (i1dest, newi2pat)) 4575 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX, 4576 NULL_RTX, NULL_RTX); 4577 else 4578 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL, 4579 NULL_RTX, NULL_RTX, NULL_RTX); 4580 } 4581 4582 if (i0dest_in_i0src) 4583 { 4584 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX); 4585 if (newi2pat && reg_set_p (i0dest, newi2pat)) 4586 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX, 4587 NULL_RTX, NULL_RTX); 4588 else 4589 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL, 4590 NULL_RTX, NULL_RTX, NULL_RTX); 4591 } 4592 4593 distribute_links (i3links); 4594 distribute_links (i2links); 4595 distribute_links (i1links); 4596 distribute_links (i0links); 4597 4598 if (REG_P (i2dest)) 4599 { 4600 struct insn_link *link; 4601 rtx_insn *i2_insn = 0; 4602 rtx i2_val = 0, set; 4603 4604 /* The insn that used to set this register doesn't exist, and 4605 this life of the register may not exist either. See if one of 4606 I3's links points to an insn that sets I2DEST. If it does, 4607 that is now the last known value for I2DEST. If we don't update 4608 this and I2 set the register to a value that depended on its old 4609 contents, we will get confused. If this insn is used, thing 4610 will be set correctly in combine_instructions. */ 4611 FOR_EACH_LOG_LINK (link, i3) 4612 if ((set = single_set (link->insn)) != 0 4613 && rtx_equal_p (i2dest, SET_DEST (set))) 4614 i2_insn = link->insn, i2_val = SET_SRC (set); 4615 4616 record_value_for_reg (i2dest, i2_insn, i2_val); 4617 4618 /* If the reg formerly set in I2 died only once and that was in I3, 4619 zero its use count so it won't make `reload' do any work. */ 4620 if (! added_sets_2 4621 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat)) 4622 && ! i2dest_in_i2src 4623 && REGNO (i2dest) < reg_n_sets_max) 4624 INC_REG_N_SETS (REGNO (i2dest), -1); 4625 } 4626 4627 if (i1 && REG_P (i1dest)) 4628 { 4629 struct insn_link *link; 4630 rtx_insn *i1_insn = 0; 4631 rtx i1_val = 0, set; 4632 4633 FOR_EACH_LOG_LINK (link, i3) 4634 if ((set = single_set (link->insn)) != 0 4635 && rtx_equal_p (i1dest, SET_DEST (set))) 4636 i1_insn = link->insn, i1_val = SET_SRC (set); 4637 4638 record_value_for_reg (i1dest, i1_insn, i1_val); 4639 4640 if (! added_sets_1 4641 && ! i1dest_in_i1src 4642 && REGNO (i1dest) < reg_n_sets_max) 4643 INC_REG_N_SETS (REGNO (i1dest), -1); 4644 } 4645 4646 if (i0 && REG_P (i0dest)) 4647 { 4648 struct insn_link *link; 4649 rtx_insn *i0_insn = 0; 4650 rtx i0_val = 0, set; 4651 4652 FOR_EACH_LOG_LINK (link, i3) 4653 if ((set = single_set (link->insn)) != 0 4654 && rtx_equal_p (i0dest, SET_DEST (set))) 4655 i0_insn = link->insn, i0_val = SET_SRC (set); 4656 4657 record_value_for_reg (i0dest, i0_insn, i0_val); 4658 4659 if (! added_sets_0 4660 && ! i0dest_in_i0src 4661 && REGNO (i0dest) < reg_n_sets_max) 4662 INC_REG_N_SETS (REGNO (i0dest), -1); 4663 } 4664 4665 /* Update reg_stat[].nonzero_bits et al for any changes that may have 4666 been made to this insn. The order is important, because newi2pat 4667 can affect nonzero_bits of newpat. */ 4668 if (newi2pat) 4669 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL); 4670 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL); 4671 } 4672 4673 if (undobuf.other_insn != NULL_RTX) 4674 { 4675 if (dump_file) 4676 { 4677 fprintf (dump_file, "modifying other_insn "); 4678 dump_insn_slim (dump_file, undobuf.other_insn); 4679 } 4680 df_insn_rescan (undobuf.other_insn); 4681 } 4682 4683 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED))) 4684 { 4685 if (dump_file) 4686 { 4687 fprintf (dump_file, "modifying insn i0 "); 4688 dump_insn_slim (dump_file, i0); 4689 } 4690 df_insn_rescan (i0); 4691 } 4692 4693 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED))) 4694 { 4695 if (dump_file) 4696 { 4697 fprintf (dump_file, "modifying insn i1 "); 4698 dump_insn_slim (dump_file, i1); 4699 } 4700 df_insn_rescan (i1); 4701 } 4702 4703 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED))) 4704 { 4705 if (dump_file) 4706 { 4707 fprintf (dump_file, "modifying insn i2 "); 4708 dump_insn_slim (dump_file, i2); 4709 } 4710 df_insn_rescan (i2); 4711 } 4712 4713 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED))) 4714 { 4715 if (dump_file) 4716 { 4717 fprintf (dump_file, "modifying insn i3 "); 4718 dump_insn_slim (dump_file, i3); 4719 } 4720 df_insn_rescan (i3); 4721 } 4722 4723 /* Set new_direct_jump_p if a new return or simple jump instruction 4724 has been created. Adjust the CFG accordingly. */ 4725 if (returnjump_p (i3) || any_uncondjump_p (i3)) 4726 { 4727 *new_direct_jump_p = 1; 4728 mark_jump_label (PATTERN (i3), i3, 0); 4729 update_cfg_for_uncondjump (i3); 4730 } 4731 4732 if (undobuf.other_insn != NULL_RTX 4733 && (returnjump_p (undobuf.other_insn) 4734 || any_uncondjump_p (undobuf.other_insn))) 4735 { 4736 *new_direct_jump_p = 1; 4737 update_cfg_for_uncondjump (undobuf.other_insn); 4738 } 4739 4740 if (GET_CODE (PATTERN (i3)) == TRAP_IF 4741 && XEXP (PATTERN (i3), 0) == const1_rtx) 4742 { 4743 basic_block bb = BLOCK_FOR_INSN (i3); 4744 gcc_assert (bb); 4745 remove_edge (split_block (bb, i3)); 4746 emit_barrier_after_bb (bb); 4747 *new_direct_jump_p = 1; 4748 } 4749 4750 if (undobuf.other_insn 4751 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF 4752 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx) 4753 { 4754 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn); 4755 gcc_assert (bb); 4756 remove_edge (split_block (bb, undobuf.other_insn)); 4757 emit_barrier_after_bb (bb); 4758 *new_direct_jump_p = 1; 4759 } 4760 4761 /* A noop might also need cleaning up of CFG, if it comes from the 4762 simplification of a jump. */ 4763 if (JUMP_P (i3) 4764 && GET_CODE (newpat) == SET 4765 && SET_SRC (newpat) == pc_rtx 4766 && SET_DEST (newpat) == pc_rtx) 4767 { 4768 *new_direct_jump_p = 1; 4769 update_cfg_for_uncondjump (i3); 4770 } 4771 4772 if (undobuf.other_insn != NULL_RTX 4773 && JUMP_P (undobuf.other_insn) 4774 && GET_CODE (PATTERN (undobuf.other_insn)) == SET 4775 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx 4776 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx) 4777 { 4778 *new_direct_jump_p = 1; 4779 update_cfg_for_uncondjump (undobuf.other_insn); 4780 } 4781 4782 combine_successes++; 4783 undo_commit (); 4784 4785 rtx_insn *ret = newi2pat ? i2 : i3; 4786 if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret)) 4787 ret = added_links_insn; 4788 if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret)) 4789 ret = added_notes_insn; 4790 4791 return ret; 4792 } 4793 4794 /* Get a marker for undoing to the current state. */ 4795 4796 static void * 4797 get_undo_marker (void) 4798 { 4799 return undobuf.undos; 4800 } 4801 4802 /* Undo the modifications up to the marker. */ 4803 4804 static void 4805 undo_to_marker (void *marker) 4806 { 4807 struct undo *undo, *next; 4808 4809 for (undo = undobuf.undos; undo != marker; undo = next) 4810 { 4811 gcc_assert (undo); 4812 4813 next = undo->next; 4814 switch (undo->kind) 4815 { 4816 case UNDO_RTX: 4817 *undo->where.r = undo->old_contents.r; 4818 break; 4819 case UNDO_INT: 4820 *undo->where.i = undo->old_contents.i; 4821 break; 4822 case UNDO_MODE: 4823 adjust_reg_mode (*undo->where.r, undo->old_contents.m); 4824 break; 4825 case UNDO_LINKS: 4826 *undo->where.l = undo->old_contents.l; 4827 break; 4828 default: 4829 gcc_unreachable (); 4830 } 4831 4832 undo->next = undobuf.frees; 4833 undobuf.frees = undo; 4834 } 4835 4836 undobuf.undos = (struct undo *) marker; 4837 } 4838 4839 /* Undo all the modifications recorded in undobuf. */ 4840 4841 static void 4842 undo_all (void) 4843 { 4844 undo_to_marker (0); 4845 } 4846 4847 /* We've committed to accepting the changes we made. Move all 4848 of the undos to the free list. */ 4849 4850 static void 4851 undo_commit (void) 4852 { 4853 struct undo *undo, *next; 4854 4855 for (undo = undobuf.undos; undo; undo = next) 4856 { 4857 next = undo->next; 4858 undo->next = undobuf.frees; 4859 undobuf.frees = undo; 4860 } 4861 undobuf.undos = 0; 4862 } 4863 4864 /* Find the innermost point within the rtx at LOC, possibly LOC itself, 4865 where we have an arithmetic expression and return that point. LOC will 4866 be inside INSN. 4867 4868 try_combine will call this function to see if an insn can be split into 4869 two insns. */ 4870 4871 static rtx * 4872 find_split_point (rtx *loc, rtx_insn *insn, bool set_src) 4873 { 4874 rtx x = *loc; 4875 enum rtx_code code = GET_CODE (x); 4876 rtx *split; 4877 unsigned HOST_WIDE_INT len = 0; 4878 HOST_WIDE_INT pos = 0; 4879 int unsignedp = 0; 4880 rtx inner = NULL_RTX; 4881 scalar_int_mode mode, inner_mode; 4882 4883 /* First special-case some codes. */ 4884 switch (code) 4885 { 4886 case SUBREG: 4887 #ifdef INSN_SCHEDULING 4888 /* If we are making a paradoxical SUBREG invalid, it becomes a split 4889 point. */ 4890 if (MEM_P (SUBREG_REG (x))) 4891 return loc; 4892 #endif 4893 return find_split_point (&SUBREG_REG (x), insn, false); 4894 4895 case MEM: 4896 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it 4897 using LO_SUM and HIGH. */ 4898 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST 4899 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)) 4900 { 4901 machine_mode address_mode = get_address_mode (x); 4902 4903 SUBST (XEXP (x, 0), 4904 gen_rtx_LO_SUM (address_mode, 4905 gen_rtx_HIGH (address_mode, XEXP (x, 0)), 4906 XEXP (x, 0))); 4907 return &XEXP (XEXP (x, 0), 0); 4908 } 4909 4910 /* If we have a PLUS whose second operand is a constant and the 4911 address is not valid, perhaps will can split it up using 4912 the machine-specific way to split large constants. We use 4913 the first pseudo-reg (one of the virtual regs) as a placeholder; 4914 it will not remain in the result. */ 4915 if (GET_CODE (XEXP (x, 0)) == PLUS 4916 && CONST_INT_P (XEXP (XEXP (x, 0), 1)) 4917 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0), 4918 MEM_ADDR_SPACE (x))) 4919 { 4920 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER]; 4921 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)), 4922 subst_insn); 4923 4924 /* This should have produced two insns, each of which sets our 4925 placeholder. If the source of the second is a valid address, 4926 we can make put both sources together and make a split point 4927 in the middle. */ 4928 4929 if (seq 4930 && NEXT_INSN (seq) != NULL_RTX 4931 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX 4932 && NONJUMP_INSN_P (seq) 4933 && GET_CODE (PATTERN (seq)) == SET 4934 && SET_DEST (PATTERN (seq)) == reg 4935 && ! reg_mentioned_p (reg, 4936 SET_SRC (PATTERN (seq))) 4937 && NONJUMP_INSN_P (NEXT_INSN (seq)) 4938 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET 4939 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg 4940 && memory_address_addr_space_p 4941 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))), 4942 MEM_ADDR_SPACE (x))) 4943 { 4944 rtx src1 = SET_SRC (PATTERN (seq)); 4945 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq))); 4946 4947 /* Replace the placeholder in SRC2 with SRC1. If we can 4948 find where in SRC2 it was placed, that can become our 4949 split point and we can replace this address with SRC2. 4950 Just try two obvious places. */ 4951 4952 src2 = replace_rtx (src2, reg, src1); 4953 split = 0; 4954 if (XEXP (src2, 0) == src1) 4955 split = &XEXP (src2, 0); 4956 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e' 4957 && XEXP (XEXP (src2, 0), 0) == src1) 4958 split = &XEXP (XEXP (src2, 0), 0); 4959 4960 if (split) 4961 { 4962 SUBST (XEXP (x, 0), src2); 4963 return split; 4964 } 4965 } 4966 4967 /* If that didn't work, perhaps the first operand is complex and 4968 needs to be computed separately, so make a split point there. 4969 This will occur on machines that just support REG + CONST 4970 and have a constant moved through some previous computation. */ 4971 4972 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0)) 4973 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG 4974 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0))))) 4975 return &XEXP (XEXP (x, 0), 0); 4976 } 4977 4978 /* If we have a PLUS whose first operand is complex, try computing it 4979 separately by making a split there. */ 4980 if (GET_CODE (XEXP (x, 0)) == PLUS 4981 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0), 4982 MEM_ADDR_SPACE (x)) 4983 && ! OBJECT_P (XEXP (XEXP (x, 0), 0)) 4984 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG 4985 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0))))) 4986 return &XEXP (XEXP (x, 0), 0); 4987 break; 4988 4989 case SET: 4990 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a 4991 ZERO_EXTRACT, the most likely reason why this doesn't match is that 4992 we need to put the operand into a register. So split at that 4993 point. */ 4994 4995 if (SET_DEST (x) == cc0_rtx 4996 && GET_CODE (SET_SRC (x)) != COMPARE 4997 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT 4998 && !OBJECT_P (SET_SRC (x)) 4999 && ! (GET_CODE (SET_SRC (x)) == SUBREG 5000 && OBJECT_P (SUBREG_REG (SET_SRC (x))))) 5001 return &SET_SRC (x); 5002 5003 /* See if we can split SET_SRC as it stands. */ 5004 split = find_split_point (&SET_SRC (x), insn, true); 5005 if (split && split != &SET_SRC (x)) 5006 return split; 5007 5008 /* See if we can split SET_DEST as it stands. */ 5009 split = find_split_point (&SET_DEST (x), insn, false); 5010 if (split && split != &SET_DEST (x)) 5011 return split; 5012 5013 /* See if this is a bitfield assignment with everything constant. If 5014 so, this is an IOR of an AND, so split it into that. */ 5015 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT 5016 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)), 5017 &inner_mode) 5018 && HWI_COMPUTABLE_MODE_P (inner_mode) 5019 && CONST_INT_P (XEXP (SET_DEST (x), 1)) 5020 && CONST_INT_P (XEXP (SET_DEST (x), 2)) 5021 && CONST_INT_P (SET_SRC (x)) 5022 && ((INTVAL (XEXP (SET_DEST (x), 1)) 5023 + INTVAL (XEXP (SET_DEST (x), 2))) 5024 <= GET_MODE_PRECISION (inner_mode)) 5025 && ! side_effects_p (XEXP (SET_DEST (x), 0))) 5026 { 5027 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2)); 5028 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1)); 5029 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x)); 5030 rtx dest = XEXP (SET_DEST (x), 0); 5031 unsigned HOST_WIDE_INT mask 5032 = (HOST_WIDE_INT_1U << len) - 1; 5033 rtx or_mask; 5034 5035 if (BITS_BIG_ENDIAN) 5036 pos = GET_MODE_PRECISION (inner_mode) - len - pos; 5037 5038 or_mask = gen_int_mode (src << pos, inner_mode); 5039 if (src == mask) 5040 SUBST (SET_SRC (x), 5041 simplify_gen_binary (IOR, inner_mode, dest, or_mask)); 5042 else 5043 { 5044 rtx negmask = gen_int_mode (~(mask << pos), inner_mode); 5045 SUBST (SET_SRC (x), 5046 simplify_gen_binary (IOR, inner_mode, 5047 simplify_gen_binary (AND, inner_mode, 5048 dest, negmask), 5049 or_mask)); 5050 } 5051 5052 SUBST (SET_DEST (x), dest); 5053 5054 split = find_split_point (&SET_SRC (x), insn, true); 5055 if (split && split != &SET_SRC (x)) 5056 return split; 5057 } 5058 5059 /* Otherwise, see if this is an operation that we can split into two. 5060 If so, try to split that. */ 5061 code = GET_CODE (SET_SRC (x)); 5062 5063 switch (code) 5064 { 5065 case AND: 5066 /* If we are AND'ing with a large constant that is only a single 5067 bit and the result is only being used in a context where we 5068 need to know if it is zero or nonzero, replace it with a bit 5069 extraction. This will avoid the large constant, which might 5070 have taken more than one insn to make. If the constant were 5071 not a valid argument to the AND but took only one insn to make, 5072 this is no worse, but if it took more than one insn, it will 5073 be better. */ 5074 5075 if (CONST_INT_P (XEXP (SET_SRC (x), 1)) 5076 && REG_P (XEXP (SET_SRC (x), 0)) 5077 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7 5078 && REG_P (SET_DEST (x)) 5079 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0 5080 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE) 5081 && XEXP (*split, 0) == SET_DEST (x) 5082 && XEXP (*split, 1) == const0_rtx) 5083 { 5084 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)), 5085 XEXP (SET_SRC (x), 0), 5086 pos, NULL_RTX, 1, 1, 0, 0); 5087 if (extraction != 0) 5088 { 5089 SUBST (SET_SRC (x), extraction); 5090 return find_split_point (loc, insn, false); 5091 } 5092 } 5093 break; 5094 5095 case NE: 5096 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X 5097 is known to be on, this can be converted into a NEG of a shift. */ 5098 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx 5099 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0)) 5100 && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0), 5101 GET_MODE (XEXP (SET_SRC (x), 5102 0))))) >= 1)) 5103 { 5104 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0)); 5105 rtx pos_rtx = gen_int_shift_amount (mode, pos); 5106 SUBST (SET_SRC (x), 5107 gen_rtx_NEG (mode, 5108 gen_rtx_LSHIFTRT (mode, 5109 XEXP (SET_SRC (x), 0), 5110 pos_rtx))); 5111 5112 split = find_split_point (&SET_SRC (x), insn, true); 5113 if (split && split != &SET_SRC (x)) 5114 return split; 5115 } 5116 break; 5117 5118 case SIGN_EXTEND: 5119 inner = XEXP (SET_SRC (x), 0); 5120 5121 /* We can't optimize if either mode is a partial integer 5122 mode as we don't know how many bits are significant 5123 in those modes. */ 5124 if (!is_int_mode (GET_MODE (inner), &inner_mode) 5125 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT) 5126 break; 5127 5128 pos = 0; 5129 len = GET_MODE_PRECISION (inner_mode); 5130 unsignedp = 0; 5131 break; 5132 5133 case SIGN_EXTRACT: 5134 case ZERO_EXTRACT: 5135 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)), 5136 &inner_mode) 5137 && CONST_INT_P (XEXP (SET_SRC (x), 1)) 5138 && CONST_INT_P (XEXP (SET_SRC (x), 2))) 5139 { 5140 inner = XEXP (SET_SRC (x), 0); 5141 len = INTVAL (XEXP (SET_SRC (x), 1)); 5142 pos = INTVAL (XEXP (SET_SRC (x), 2)); 5143 5144 if (BITS_BIG_ENDIAN) 5145 pos = GET_MODE_PRECISION (inner_mode) - len - pos; 5146 unsignedp = (code == ZERO_EXTRACT); 5147 } 5148 break; 5149 5150 default: 5151 break; 5152 } 5153 5154 if (len 5155 && known_subrange_p (pos, len, 5156 0, GET_MODE_PRECISION (GET_MODE (inner))) 5157 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode)) 5158 { 5159 /* For unsigned, we have a choice of a shift followed by an 5160 AND or two shifts. Use two shifts for field sizes where the 5161 constant might be too large. We assume here that we can 5162 always at least get 8-bit constants in an AND insn, which is 5163 true for every current RISC. */ 5164 5165 if (unsignedp && len <= 8) 5166 { 5167 unsigned HOST_WIDE_INT mask 5168 = (HOST_WIDE_INT_1U << len) - 1; 5169 rtx pos_rtx = gen_int_shift_amount (mode, pos); 5170 SUBST (SET_SRC (x), 5171 gen_rtx_AND (mode, 5172 gen_rtx_LSHIFTRT 5173 (mode, gen_lowpart (mode, inner), pos_rtx), 5174 gen_int_mode (mask, mode))); 5175 5176 split = find_split_point (&SET_SRC (x), insn, true); 5177 if (split && split != &SET_SRC (x)) 5178 return split; 5179 } 5180 else 5181 { 5182 int left_bits = GET_MODE_PRECISION (mode) - len - pos; 5183 int right_bits = GET_MODE_PRECISION (mode) - len; 5184 SUBST (SET_SRC (x), 5185 gen_rtx_fmt_ee 5186 (unsignedp ? LSHIFTRT : ASHIFTRT, mode, 5187 gen_rtx_ASHIFT (mode, 5188 gen_lowpart (mode, inner), 5189 gen_int_shift_amount (mode, left_bits)), 5190 gen_int_shift_amount (mode, right_bits))); 5191 5192 split = find_split_point (&SET_SRC (x), insn, true); 5193 if (split && split != &SET_SRC (x)) 5194 return split; 5195 } 5196 } 5197 5198 /* See if this is a simple operation with a constant as the second 5199 operand. It might be that this constant is out of range and hence 5200 could be used as a split point. */ 5201 if (BINARY_P (SET_SRC (x)) 5202 && CONSTANT_P (XEXP (SET_SRC (x), 1)) 5203 && (OBJECT_P (XEXP (SET_SRC (x), 0)) 5204 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG 5205 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0)))))) 5206 return &XEXP (SET_SRC (x), 1); 5207 5208 /* Finally, see if this is a simple operation with its first operand 5209 not in a register. The operation might require this operand in a 5210 register, so return it as a split point. We can always do this 5211 because if the first operand were another operation, we would have 5212 already found it as a split point. */ 5213 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x))) 5214 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode)) 5215 return &XEXP (SET_SRC (x), 0); 5216 5217 return 0; 5218 5219 case AND: 5220 case IOR: 5221 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR, 5222 it is better to write this as (not (ior A B)) so we can split it. 5223 Similarly for IOR. */ 5224 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT) 5225 { 5226 SUBST (*loc, 5227 gen_rtx_NOT (GET_MODE (x), 5228 gen_rtx_fmt_ee (code == IOR ? AND : IOR, 5229 GET_MODE (x), 5230 XEXP (XEXP (x, 0), 0), 5231 XEXP (XEXP (x, 1), 0)))); 5232 return find_split_point (loc, insn, set_src); 5233 } 5234 5235 /* Many RISC machines have a large set of logical insns. If the 5236 second operand is a NOT, put it first so we will try to split the 5237 other operand first. */ 5238 if (GET_CODE (XEXP (x, 1)) == NOT) 5239 { 5240 rtx tem = XEXP (x, 0); 5241 SUBST (XEXP (x, 0), XEXP (x, 1)); 5242 SUBST (XEXP (x, 1), tem); 5243 } 5244 break; 5245 5246 case PLUS: 5247 case MINUS: 5248 /* Canonicalization can produce (minus A (mult B C)), where C is a 5249 constant. It may be better to try splitting (plus (mult B -C) A) 5250 instead if this isn't a multiply by a power of two. */ 5251 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT 5252 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT 5253 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1)))) 5254 { 5255 machine_mode mode = GET_MODE (x); 5256 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1)); 5257 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode); 5258 SUBST (*loc, gen_rtx_PLUS (mode, 5259 gen_rtx_MULT (mode, 5260 XEXP (XEXP (x, 1), 0), 5261 gen_int_mode (other_int, 5262 mode)), 5263 XEXP (x, 0))); 5264 return find_split_point (loc, insn, set_src); 5265 } 5266 5267 /* Split at a multiply-accumulate instruction. However if this is 5268 the SET_SRC, we likely do not have such an instruction and it's 5269 worthless to try this split. */ 5270 if (!set_src 5271 && (GET_CODE (XEXP (x, 0)) == MULT 5272 || (GET_CODE (XEXP (x, 0)) == ASHIFT 5273 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))) 5274 return loc; 5275 5276 default: 5277 break; 5278 } 5279 5280 /* Otherwise, select our actions depending on our rtx class. */ 5281 switch (GET_RTX_CLASS (code)) 5282 { 5283 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */ 5284 case RTX_TERNARY: 5285 split = find_split_point (&XEXP (x, 2), insn, false); 5286 if (split) 5287 return split; 5288 /* fall through */ 5289 case RTX_BIN_ARITH: 5290 case RTX_COMM_ARITH: 5291 case RTX_COMPARE: 5292 case RTX_COMM_COMPARE: 5293 split = find_split_point (&XEXP (x, 1), insn, false); 5294 if (split) 5295 return split; 5296 /* fall through */ 5297 case RTX_UNARY: 5298 /* Some machines have (and (shift ...) ...) insns. If X is not 5299 an AND, but XEXP (X, 0) is, use it as our split point. */ 5300 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND) 5301 return &XEXP (x, 0); 5302 5303 split = find_split_point (&XEXP (x, 0), insn, false); 5304 if (split) 5305 return split; 5306 return loc; 5307 5308 default: 5309 /* Otherwise, we don't have a split point. */ 5310 return 0; 5311 } 5312 } 5313 5314 /* Throughout X, replace FROM with TO, and return the result. 5315 The result is TO if X is FROM; 5316 otherwise the result is X, but its contents may have been modified. 5317 If they were modified, a record was made in undobuf so that 5318 undo_all will (among other things) return X to its original state. 5319 5320 If the number of changes necessary is too much to record to undo, 5321 the excess changes are not made, so the result is invalid. 5322 The changes already made can still be undone. 5323 undobuf.num_undo is incremented for such changes, so by testing that 5324 the caller can tell whether the result is valid. 5325 5326 `n_occurrences' is incremented each time FROM is replaced. 5327 5328 IN_DEST is nonzero if we are processing the SET_DEST of a SET. 5329 5330 IN_COND is nonzero if we are at the top level of a condition. 5331 5332 UNIQUE_COPY is nonzero if each substitution must be unique. We do this 5333 by copying if `n_occurrences' is nonzero. */ 5334 5335 static rtx 5336 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy) 5337 { 5338 enum rtx_code code = GET_CODE (x); 5339 machine_mode op0_mode = VOIDmode; 5340 const char *fmt; 5341 int len, i; 5342 rtx new_rtx; 5343 5344 /* Two expressions are equal if they are identical copies of a shared 5345 RTX or if they are both registers with the same register number 5346 and mode. */ 5347 5348 #define COMBINE_RTX_EQUAL_P(X,Y) \ 5349 ((X) == (Y) \ 5350 || (REG_P (X) && REG_P (Y) \ 5351 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y))) 5352 5353 /* Do not substitute into clobbers of regs -- this will never result in 5354 valid RTL. */ 5355 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0))) 5356 return x; 5357 5358 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from)) 5359 { 5360 n_occurrences++; 5361 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to); 5362 } 5363 5364 /* If X and FROM are the same register but different modes, they 5365 will not have been seen as equal above. However, the log links code 5366 will make a LOG_LINKS entry for that case. If we do nothing, we 5367 will try to rerecognize our original insn and, when it succeeds, 5368 we will delete the feeding insn, which is incorrect. 5369 5370 So force this insn not to match in this (rare) case. */ 5371 if (! in_dest && code == REG && REG_P (from) 5372 && reg_overlap_mentioned_p (x, from)) 5373 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); 5374 5375 /* If this is an object, we are done unless it is a MEM or LO_SUM, both 5376 of which may contain things that can be combined. */ 5377 if (code != MEM && code != LO_SUM && OBJECT_P (x)) 5378 return x; 5379 5380 /* It is possible to have a subexpression appear twice in the insn. 5381 Suppose that FROM is a register that appears within TO. 5382 Then, after that subexpression has been scanned once by `subst', 5383 the second time it is scanned, TO may be found. If we were 5384 to scan TO here, we would find FROM within it and create a 5385 self-referent rtl structure which is completely wrong. */ 5386 if (COMBINE_RTX_EQUAL_P (x, to)) 5387 return to; 5388 5389 /* Parallel asm_operands need special attention because all of the 5390 inputs are shared across the arms. Furthermore, unsharing the 5391 rtl results in recognition failures. Failure to handle this case 5392 specially can result in circular rtl. 5393 5394 Solve this by doing a normal pass across the first entry of the 5395 parallel, and only processing the SET_DESTs of the subsequent 5396 entries. Ug. */ 5397 5398 if (code == PARALLEL 5399 && GET_CODE (XVECEXP (x, 0, 0)) == SET 5400 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS) 5401 { 5402 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy); 5403 5404 /* If this substitution failed, this whole thing fails. */ 5405 if (GET_CODE (new_rtx) == CLOBBER 5406 && XEXP (new_rtx, 0) == const0_rtx) 5407 return new_rtx; 5408 5409 SUBST (XVECEXP (x, 0, 0), new_rtx); 5410 5411 for (i = XVECLEN (x, 0) - 1; i >= 1; i--) 5412 { 5413 rtx dest = SET_DEST (XVECEXP (x, 0, i)); 5414 5415 if (!REG_P (dest) 5416 && GET_CODE (dest) != CC0 5417 && GET_CODE (dest) != PC) 5418 { 5419 new_rtx = subst (dest, from, to, 0, 0, unique_copy); 5420 5421 /* If this substitution failed, this whole thing fails. */ 5422 if (GET_CODE (new_rtx) == CLOBBER 5423 && XEXP (new_rtx, 0) == const0_rtx) 5424 return new_rtx; 5425 5426 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx); 5427 } 5428 } 5429 } 5430 else 5431 { 5432 len = GET_RTX_LENGTH (code); 5433 fmt = GET_RTX_FORMAT (code); 5434 5435 /* We don't need to process a SET_DEST that is a register, CC0, 5436 or PC, so set up to skip this common case. All other cases 5437 where we want to suppress replacing something inside a 5438 SET_SRC are handled via the IN_DEST operand. */ 5439 if (code == SET 5440 && (REG_P (SET_DEST (x)) 5441 || GET_CODE (SET_DEST (x)) == CC0 5442 || GET_CODE (SET_DEST (x)) == PC)) 5443 fmt = "ie"; 5444 5445 /* Trying to simplify the operands of a widening MULT is not likely 5446 to create RTL matching a machine insn. */ 5447 if (code == MULT 5448 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND 5449 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) 5450 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND 5451 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND) 5452 && REG_P (XEXP (XEXP (x, 0), 0)) 5453 && REG_P (XEXP (XEXP (x, 1), 0)) 5454 && from == to) 5455 return x; 5456 5457 5458 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a 5459 constant. */ 5460 if (fmt[0] == 'e') 5461 op0_mode = GET_MODE (XEXP (x, 0)); 5462 5463 for (i = 0; i < len; i++) 5464 { 5465 if (fmt[i] == 'E') 5466 { 5467 int j; 5468 for (j = XVECLEN (x, i) - 1; j >= 0; j--) 5469 { 5470 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from)) 5471 { 5472 new_rtx = (unique_copy && n_occurrences 5473 ? copy_rtx (to) : to); 5474 n_occurrences++; 5475 } 5476 else 5477 { 5478 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0, 5479 unique_copy); 5480 5481 /* If this substitution failed, this whole thing 5482 fails. */ 5483 if (GET_CODE (new_rtx) == CLOBBER 5484 && XEXP (new_rtx, 0) == const0_rtx) 5485 return new_rtx; 5486 } 5487 5488 SUBST (XVECEXP (x, i, j), new_rtx); 5489 } 5490 } 5491 else if (fmt[i] == 'e') 5492 { 5493 /* If this is a register being set, ignore it. */ 5494 new_rtx = XEXP (x, i); 5495 if (in_dest 5496 && i == 0 5497 && (((code == SUBREG || code == ZERO_EXTRACT) 5498 && REG_P (new_rtx)) 5499 || code == STRICT_LOW_PART)) 5500 ; 5501 5502 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from)) 5503 { 5504 /* In general, don't install a subreg involving two 5505 modes not tieable. It can worsen register 5506 allocation, and can even make invalid reload 5507 insns, since the reg inside may need to be copied 5508 from in the outside mode, and that may be invalid 5509 if it is an fp reg copied in integer mode. 5510 5511 We allow two exceptions to this: It is valid if 5512 it is inside another SUBREG and the mode of that 5513 SUBREG and the mode of the inside of TO is 5514 tieable and it is valid if X is a SET that copies 5515 FROM to CC0. */ 5516 5517 if (GET_CODE (to) == SUBREG 5518 && !targetm.modes_tieable_p (GET_MODE (to), 5519 GET_MODE (SUBREG_REG (to))) 5520 && ! (code == SUBREG 5521 && (targetm.modes_tieable_p 5522 (GET_MODE (x), GET_MODE (SUBREG_REG (to))))) 5523 && (!HAVE_cc0 5524 || (! (code == SET 5525 && i == 1 5526 && XEXP (x, 0) == cc0_rtx)))) 5527 return gen_rtx_CLOBBER (VOIDmode, const0_rtx); 5528 5529 if (code == SUBREG 5530 && REG_P (to) 5531 && REGNO (to) < FIRST_PSEUDO_REGISTER 5532 && simplify_subreg_regno (REGNO (to), GET_MODE (to), 5533 SUBREG_BYTE (x), 5534 GET_MODE (x)) < 0) 5535 return gen_rtx_CLOBBER (VOIDmode, const0_rtx); 5536 5537 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to); 5538 n_occurrences++; 5539 } 5540 else 5541 /* If we are in a SET_DEST, suppress most cases unless we 5542 have gone inside a MEM, in which case we want to 5543 simplify the address. We assume here that things that 5544 are actually part of the destination have their inner 5545 parts in the first expression. This is true for SUBREG, 5546 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only 5547 things aside from REG and MEM that should appear in a 5548 SET_DEST. */ 5549 new_rtx = subst (XEXP (x, i), from, to, 5550 (((in_dest 5551 && (code == SUBREG || code == STRICT_LOW_PART 5552 || code == ZERO_EXTRACT)) 5553 || code == SET) 5554 && i == 0), 5555 code == IF_THEN_ELSE && i == 0, 5556 unique_copy); 5557 5558 /* If we found that we will have to reject this combination, 5559 indicate that by returning the CLOBBER ourselves, rather than 5560 an expression containing it. This will speed things up as 5561 well as prevent accidents where two CLOBBERs are considered 5562 to be equal, thus producing an incorrect simplification. */ 5563 5564 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx) 5565 return new_rtx; 5566 5567 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx)) 5568 { 5569 machine_mode mode = GET_MODE (x); 5570 5571 x = simplify_subreg (GET_MODE (x), new_rtx, 5572 GET_MODE (SUBREG_REG (x)), 5573 SUBREG_BYTE (x)); 5574 if (! x) 5575 x = gen_rtx_CLOBBER (mode, const0_rtx); 5576 } 5577 else if (CONST_SCALAR_INT_P (new_rtx) 5578 && (GET_CODE (x) == ZERO_EXTEND 5579 || GET_CODE (x) == FLOAT 5580 || GET_CODE (x) == UNSIGNED_FLOAT)) 5581 { 5582 x = simplify_unary_operation (GET_CODE (x), GET_MODE (x), 5583 new_rtx, 5584 GET_MODE (XEXP (x, 0))); 5585 if (!x) 5586 return gen_rtx_CLOBBER (VOIDmode, const0_rtx); 5587 } 5588 else 5589 SUBST (XEXP (x, i), new_rtx); 5590 } 5591 } 5592 } 5593 5594 /* Check if we are loading something from the constant pool via float 5595 extension; in this case we would undo compress_float_constant 5596 optimization and degenerate constant load to an immediate value. */ 5597 if (GET_CODE (x) == FLOAT_EXTEND 5598 && MEM_P (XEXP (x, 0)) 5599 && MEM_READONLY_P (XEXP (x, 0))) 5600 { 5601 rtx tmp = avoid_constant_pool_reference (x); 5602 if (x != tmp) 5603 return x; 5604 } 5605 5606 /* Try to simplify X. If the simplification changed the code, it is likely 5607 that further simplification will help, so loop, but limit the number 5608 of repetitions that will be performed. */ 5609 5610 for (i = 0; i < 4; i++) 5611 { 5612 /* If X is sufficiently simple, don't bother trying to do anything 5613 with it. */ 5614 if (code != CONST_INT && code != REG && code != CLOBBER) 5615 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond); 5616 5617 if (GET_CODE (x) == code) 5618 break; 5619 5620 code = GET_CODE (x); 5621 5622 /* We no longer know the original mode of operand 0 since we 5623 have changed the form of X) */ 5624 op0_mode = VOIDmode; 5625 } 5626 5627 return x; 5628 } 5629 5630 /* If X is a commutative operation whose operands are not in the canonical 5631 order, use substitutions to swap them. */ 5632 5633 static void 5634 maybe_swap_commutative_operands (rtx x) 5635 { 5636 if (COMMUTATIVE_ARITH_P (x) 5637 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) 5638 { 5639 rtx temp = XEXP (x, 0); 5640 SUBST (XEXP (x, 0), XEXP (x, 1)); 5641 SUBST (XEXP (x, 1), temp); 5642 } 5643 } 5644 5645 /* Simplify X, a piece of RTL. We just operate on the expression at the 5646 outer level; call `subst' to simplify recursively. Return the new 5647 expression. 5648 5649 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero 5650 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level 5651 of a condition. */ 5652 5653 static rtx 5654 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest, 5655 int in_cond) 5656 { 5657 enum rtx_code code = GET_CODE (x); 5658 machine_mode mode = GET_MODE (x); 5659 scalar_int_mode int_mode; 5660 rtx temp; 5661 int i; 5662 5663 /* If this is a commutative operation, put a constant last and a complex 5664 expression first. We don't need to do this for comparisons here. */ 5665 maybe_swap_commutative_operands (x); 5666 5667 /* Try to fold this expression in case we have constants that weren't 5668 present before. */ 5669 temp = 0; 5670 switch (GET_RTX_CLASS (code)) 5671 { 5672 case RTX_UNARY: 5673 if (op0_mode == VOIDmode) 5674 op0_mode = GET_MODE (XEXP (x, 0)); 5675 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode); 5676 break; 5677 case RTX_COMPARE: 5678 case RTX_COMM_COMPARE: 5679 { 5680 machine_mode cmp_mode = GET_MODE (XEXP (x, 0)); 5681 if (cmp_mode == VOIDmode) 5682 { 5683 cmp_mode = GET_MODE (XEXP (x, 1)); 5684 if (cmp_mode == VOIDmode) 5685 cmp_mode = op0_mode; 5686 } 5687 temp = simplify_relational_operation (code, mode, cmp_mode, 5688 XEXP (x, 0), XEXP (x, 1)); 5689 } 5690 break; 5691 case RTX_COMM_ARITH: 5692 case RTX_BIN_ARITH: 5693 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1)); 5694 break; 5695 case RTX_BITFIELD_OPS: 5696 case RTX_TERNARY: 5697 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0), 5698 XEXP (x, 1), XEXP (x, 2)); 5699 break; 5700 default: 5701 break; 5702 } 5703 5704 if (temp) 5705 { 5706 x = temp; 5707 code = GET_CODE (temp); 5708 op0_mode = VOIDmode; 5709 mode = GET_MODE (temp); 5710 } 5711 5712 /* If this is a simple operation applied to an IF_THEN_ELSE, try 5713 applying it to the arms of the IF_THEN_ELSE. This often simplifies 5714 things. Check for cases where both arms are testing the same 5715 condition. 5716 5717 Don't do anything if all operands are very simple. */ 5718 5719 if ((BINARY_P (x) 5720 && ((!OBJECT_P (XEXP (x, 0)) 5721 && ! (GET_CODE (XEXP (x, 0)) == SUBREG 5722 && OBJECT_P (SUBREG_REG (XEXP (x, 0))))) 5723 || (!OBJECT_P (XEXP (x, 1)) 5724 && ! (GET_CODE (XEXP (x, 1)) == SUBREG 5725 && OBJECT_P (SUBREG_REG (XEXP (x, 1))))))) 5726 || (UNARY_P (x) 5727 && (!OBJECT_P (XEXP (x, 0)) 5728 && ! (GET_CODE (XEXP (x, 0)) == SUBREG 5729 && OBJECT_P (SUBREG_REG (XEXP (x, 0))))))) 5730 { 5731 rtx cond, true_rtx, false_rtx; 5732 5733 cond = if_then_else_cond (x, &true_rtx, &false_rtx); 5734 if (cond != 0 5735 /* If everything is a comparison, what we have is highly unlikely 5736 to be simpler, so don't use it. */ 5737 && ! (COMPARISON_P (x) 5738 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))) 5739 /* Similarly, if we end up with one of the expressions the same 5740 as the original, it is certainly not simpler. */ 5741 && ! rtx_equal_p (x, true_rtx) 5742 && ! rtx_equal_p (x, false_rtx)) 5743 { 5744 rtx cop1 = const0_rtx; 5745 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1); 5746 5747 if (cond_code == NE && COMPARISON_P (cond)) 5748 return x; 5749 5750 /* Simplify the alternative arms; this may collapse the true and 5751 false arms to store-flag values. Be careful to use copy_rtx 5752 here since true_rtx or false_rtx might share RTL with x as a 5753 result of the if_then_else_cond call above. */ 5754 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0); 5755 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0); 5756 5757 /* If true_rtx and false_rtx are not general_operands, an if_then_else 5758 is unlikely to be simpler. */ 5759 if (general_operand (true_rtx, VOIDmode) 5760 && general_operand (false_rtx, VOIDmode)) 5761 { 5762 enum rtx_code reversed; 5763 5764 /* Restarting if we generate a store-flag expression will cause 5765 us to loop. Just drop through in this case. */ 5766 5767 /* If the result values are STORE_FLAG_VALUE and zero, we can 5768 just make the comparison operation. */ 5769 if (true_rtx == const_true_rtx && false_rtx == const0_rtx) 5770 x = simplify_gen_relational (cond_code, mode, VOIDmode, 5771 cond, cop1); 5772 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx 5773 && ((reversed = reversed_comparison_code_parts 5774 (cond_code, cond, cop1, NULL)) 5775 != UNKNOWN)) 5776 x = simplify_gen_relational (reversed, mode, VOIDmode, 5777 cond, cop1); 5778 5779 /* Likewise, we can make the negate of a comparison operation 5780 if the result values are - STORE_FLAG_VALUE and zero. */ 5781 else if (CONST_INT_P (true_rtx) 5782 && INTVAL (true_rtx) == - STORE_FLAG_VALUE 5783 && false_rtx == const0_rtx) 5784 x = simplify_gen_unary (NEG, mode, 5785 simplify_gen_relational (cond_code, 5786 mode, VOIDmode, 5787 cond, cop1), 5788 mode); 5789 else if (CONST_INT_P (false_rtx) 5790 && INTVAL (false_rtx) == - STORE_FLAG_VALUE 5791 && true_rtx == const0_rtx 5792 && ((reversed = reversed_comparison_code_parts 5793 (cond_code, cond, cop1, NULL)) 5794 != UNKNOWN)) 5795 x = simplify_gen_unary (NEG, mode, 5796 simplify_gen_relational (reversed, 5797 mode, VOIDmode, 5798 cond, cop1), 5799 mode); 5800 else 5801 return gen_rtx_IF_THEN_ELSE (mode, 5802 simplify_gen_relational (cond_code, 5803 mode, 5804 VOIDmode, 5805 cond, 5806 cop1), 5807 true_rtx, false_rtx); 5808 5809 code = GET_CODE (x); 5810 op0_mode = VOIDmode; 5811 } 5812 } 5813 } 5814 5815 /* First see if we can apply the inverse distributive law. */ 5816 if (code == PLUS || code == MINUS 5817 || code == AND || code == IOR || code == XOR) 5818 { 5819 x = apply_distributive_law (x); 5820 code = GET_CODE (x); 5821 op0_mode = VOIDmode; 5822 } 5823 5824 /* If CODE is an associative operation not otherwise handled, see if we 5825 can associate some operands. This can win if they are constants or 5826 if they are logically related (i.e. (a & b) & a). */ 5827 if ((code == PLUS || code == MINUS || code == MULT || code == DIV 5828 || code == AND || code == IOR || code == XOR 5829 || code == SMAX || code == SMIN || code == UMAX || code == UMIN) 5830 && ((INTEGRAL_MODE_P (mode) && code != DIV) 5831 || (flag_associative_math && FLOAT_MODE_P (mode)))) 5832 { 5833 if (GET_CODE (XEXP (x, 0)) == code) 5834 { 5835 rtx other = XEXP (XEXP (x, 0), 0); 5836 rtx inner_op0 = XEXP (XEXP (x, 0), 1); 5837 rtx inner_op1 = XEXP (x, 1); 5838 rtx inner; 5839 5840 /* Make sure we pass the constant operand if any as the second 5841 one if this is a commutative operation. */ 5842 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x)) 5843 std::swap (inner_op0, inner_op1); 5844 inner = simplify_binary_operation (code == MINUS ? PLUS 5845 : code == DIV ? MULT 5846 : code, 5847 mode, inner_op0, inner_op1); 5848 5849 /* For commutative operations, try the other pair if that one 5850 didn't simplify. */ 5851 if (inner == 0 && COMMUTATIVE_ARITH_P (x)) 5852 { 5853 other = XEXP (XEXP (x, 0), 1); 5854 inner = simplify_binary_operation (code, mode, 5855 XEXP (XEXP (x, 0), 0), 5856 XEXP (x, 1)); 5857 } 5858 5859 if (inner) 5860 return simplify_gen_binary (code, mode, other, inner); 5861 } 5862 } 5863 5864 /* A little bit of algebraic simplification here. */ 5865 switch (code) 5866 { 5867 case MEM: 5868 /* Ensure that our address has any ASHIFTs converted to MULT in case 5869 address-recognizing predicates are called later. */ 5870 temp = make_compound_operation (XEXP (x, 0), MEM); 5871 SUBST (XEXP (x, 0), temp); 5872 break; 5873 5874 case SUBREG: 5875 if (op0_mode == VOIDmode) 5876 op0_mode = GET_MODE (SUBREG_REG (x)); 5877 5878 /* See if this can be moved to simplify_subreg. */ 5879 if (CONSTANT_P (SUBREG_REG (x)) 5880 && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x)) 5881 /* Don't call gen_lowpart if the inner mode 5882 is VOIDmode and we cannot simplify it, as SUBREG without 5883 inner mode is invalid. */ 5884 && (GET_MODE (SUBREG_REG (x)) != VOIDmode 5885 || gen_lowpart_common (mode, SUBREG_REG (x)))) 5886 return gen_lowpart (mode, SUBREG_REG (x)); 5887 5888 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC) 5889 break; 5890 { 5891 rtx temp; 5892 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode, 5893 SUBREG_BYTE (x)); 5894 if (temp) 5895 return temp; 5896 5897 /* If op is known to have all lower bits zero, the result is zero. */ 5898 scalar_int_mode int_mode, int_op0_mode; 5899 if (!in_dest 5900 && is_a <scalar_int_mode> (mode, &int_mode) 5901 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode) 5902 && (GET_MODE_PRECISION (int_mode) 5903 < GET_MODE_PRECISION (int_op0_mode)) 5904 && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode), 5905 SUBREG_BYTE (x)) 5906 && HWI_COMPUTABLE_MODE_P (int_op0_mode) 5907 && (nonzero_bits (SUBREG_REG (x), int_op0_mode) 5908 & GET_MODE_MASK (int_mode)) == 0) 5909 return CONST0_RTX (int_mode); 5910 } 5911 5912 /* Don't change the mode of the MEM if that would change the meaning 5913 of the address. */ 5914 if (MEM_P (SUBREG_REG (x)) 5915 && (MEM_VOLATILE_P (SUBREG_REG (x)) 5916 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0), 5917 MEM_ADDR_SPACE (SUBREG_REG (x))))) 5918 return gen_rtx_CLOBBER (mode, const0_rtx); 5919 5920 /* Note that we cannot do any narrowing for non-constants since 5921 we might have been counting on using the fact that some bits were 5922 zero. We now do this in the SET. */ 5923 5924 break; 5925 5926 case NEG: 5927 temp = expand_compound_operation (XEXP (x, 0)); 5928 5929 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be 5930 replaced by (lshiftrt X C). This will convert 5931 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */ 5932 5933 if (GET_CODE (temp) == ASHIFTRT 5934 && CONST_INT_P (XEXP (temp, 1)) 5935 && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1) 5936 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0), 5937 INTVAL (XEXP (temp, 1))); 5938 5939 /* If X has only a single bit that might be nonzero, say, bit I, convert 5940 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of 5941 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to 5942 (sign_extract X 1 Y). But only do this if TEMP isn't a register 5943 or a SUBREG of one since we'd be making the expression more 5944 complex if it was just a register. */ 5945 5946 if (!REG_P (temp) 5947 && ! (GET_CODE (temp) == SUBREG 5948 && REG_P (SUBREG_REG (temp))) 5949 && is_a <scalar_int_mode> (mode, &int_mode) 5950 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0) 5951 { 5952 rtx temp1 = simplify_shift_const 5953 (NULL_RTX, ASHIFTRT, int_mode, 5954 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp, 5955 GET_MODE_PRECISION (int_mode) - 1 - i), 5956 GET_MODE_PRECISION (int_mode) - 1 - i); 5957 5958 /* If all we did was surround TEMP with the two shifts, we 5959 haven't improved anything, so don't use it. Otherwise, 5960 we are better off with TEMP1. */ 5961 if (GET_CODE (temp1) != ASHIFTRT 5962 || GET_CODE (XEXP (temp1, 0)) != ASHIFT 5963 || XEXP (XEXP (temp1, 0), 0) != temp) 5964 return temp1; 5965 } 5966 break; 5967 5968 case TRUNCATE: 5969 /* We can't handle truncation to a partial integer mode here 5970 because we don't know the real bitsize of the partial 5971 integer mode. */ 5972 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT) 5973 break; 5974 5975 if (HWI_COMPUTABLE_MODE_P (mode)) 5976 SUBST (XEXP (x, 0), 5977 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)), 5978 GET_MODE_MASK (mode), 0)); 5979 5980 /* We can truncate a constant value and return it. */ 5981 if (CONST_INT_P (XEXP (x, 0))) 5982 return gen_int_mode (INTVAL (XEXP (x, 0)), mode); 5983 5984 /* Similarly to what we do in simplify-rtx.c, a truncate of a register 5985 whose value is a comparison can be replaced with a subreg if 5986 STORE_FLAG_VALUE permits. */ 5987 if (HWI_COMPUTABLE_MODE_P (mode) 5988 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0 5989 && (temp = get_last_value (XEXP (x, 0))) 5990 && COMPARISON_P (temp)) 5991 return gen_lowpart (mode, XEXP (x, 0)); 5992 break; 5993 5994 case CONST: 5995 /* (const (const X)) can become (const X). Do it this way rather than 5996 returning the inner CONST since CONST can be shared with a 5997 REG_EQUAL note. */ 5998 if (GET_CODE (XEXP (x, 0)) == CONST) 5999 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0)); 6000 break; 6001 6002 case LO_SUM: 6003 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we 6004 can add in an offset. find_split_point will split this address up 6005 again if it doesn't match. */ 6006 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH 6007 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1))) 6008 return XEXP (x, 1); 6009 break; 6010 6011 case PLUS: 6012 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>) 6013 when c is (const_int (pow2 + 1) / 2) is a sign extension of a 6014 bit-field and can be replaced by either a sign_extend or a 6015 sign_extract. The `and' may be a zero_extend and the two 6016 <c>, -<c> constants may be reversed. */ 6017 if (GET_CODE (XEXP (x, 0)) == XOR 6018 && is_a <scalar_int_mode> (mode, &int_mode) 6019 && CONST_INT_P (XEXP (x, 1)) 6020 && CONST_INT_P (XEXP (XEXP (x, 0), 1)) 6021 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1)) 6022 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0 6023 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0) 6024 && HWI_COMPUTABLE_MODE_P (int_mode) 6025 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND 6026 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1)) 6027 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)) 6028 == (HOST_WIDE_INT_1U << (i + 1)) - 1)) 6029 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND 6030 && known_eq ((GET_MODE_PRECISION 6031 (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))), 6032 (unsigned int) i + 1)))) 6033 return simplify_shift_const 6034 (NULL_RTX, ASHIFTRT, int_mode, 6035 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, 6036 XEXP (XEXP (XEXP (x, 0), 0), 0), 6037 GET_MODE_PRECISION (int_mode) - (i + 1)), 6038 GET_MODE_PRECISION (int_mode) - (i + 1)); 6039 6040 /* If only the low-order bit of X is possibly nonzero, (plus x -1) 6041 can become (ashiftrt (ashift (xor x 1) C) C) where C is 6042 the bitsize of the mode - 1. This allows simplification of 6043 "a = (b & 8) == 0;" */ 6044 if (XEXP (x, 1) == constm1_rtx 6045 && !REG_P (XEXP (x, 0)) 6046 && ! (GET_CODE (XEXP (x, 0)) == SUBREG 6047 && REG_P (SUBREG_REG (XEXP (x, 0)))) 6048 && is_a <scalar_int_mode> (mode, &int_mode) 6049 && nonzero_bits (XEXP (x, 0), int_mode) == 1) 6050 return simplify_shift_const 6051 (NULL_RTX, ASHIFTRT, int_mode, 6052 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, 6053 gen_rtx_XOR (int_mode, XEXP (x, 0), 6054 const1_rtx), 6055 GET_MODE_PRECISION (int_mode) - 1), 6056 GET_MODE_PRECISION (int_mode) - 1); 6057 6058 /* If we are adding two things that have no bits in common, convert 6059 the addition into an IOR. This will often be further simplified, 6060 for example in cases like ((a & 1) + (a & 2)), which can 6061 become a & 3. */ 6062 6063 if (HWI_COMPUTABLE_MODE_P (mode) 6064 && (nonzero_bits (XEXP (x, 0), mode) 6065 & nonzero_bits (XEXP (x, 1), mode)) == 0) 6066 { 6067 /* Try to simplify the expression further. */ 6068 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1)); 6069 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0); 6070 6071 /* If we could, great. If not, do not go ahead with the IOR 6072 replacement, since PLUS appears in many special purpose 6073 address arithmetic instructions. */ 6074 if (GET_CODE (temp) != CLOBBER 6075 && (GET_CODE (temp) != IOR 6076 || ((XEXP (temp, 0) != XEXP (x, 0) 6077 || XEXP (temp, 1) != XEXP (x, 1)) 6078 && (XEXP (temp, 0) != XEXP (x, 1) 6079 || XEXP (temp, 1) != XEXP (x, 0))))) 6080 return temp; 6081 } 6082 6083 /* Canonicalize x + x into x << 1. */ 6084 if (GET_MODE_CLASS (mode) == MODE_INT 6085 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1)) 6086 && !side_effects_p (XEXP (x, 0))) 6087 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx); 6088 6089 break; 6090 6091 case MINUS: 6092 /* (minus <foo> (and <foo> (const_int -pow2))) becomes 6093 (and <foo> (const_int pow2-1)) */ 6094 if (is_a <scalar_int_mode> (mode, &int_mode) 6095 && GET_CODE (XEXP (x, 1)) == AND 6096 && CONST_INT_P (XEXP (XEXP (x, 1), 1)) 6097 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1))) 6098 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0))) 6099 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0), 6100 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1); 6101 break; 6102 6103 case MULT: 6104 /* If we have (mult (plus A B) C), apply the distributive law and then 6105 the inverse distributive law to see if things simplify. This 6106 occurs mostly in addresses, often when unrolling loops. */ 6107 6108 if (GET_CODE (XEXP (x, 0)) == PLUS) 6109 { 6110 rtx result = distribute_and_simplify_rtx (x, 0); 6111 if (result) 6112 return result; 6113 } 6114 6115 /* Try simplify a*(b/c) as (a*b)/c. */ 6116 if (FLOAT_MODE_P (mode) && flag_associative_math 6117 && GET_CODE (XEXP (x, 0)) == DIV) 6118 { 6119 rtx tem = simplify_binary_operation (MULT, mode, 6120 XEXP (XEXP (x, 0), 0), 6121 XEXP (x, 1)); 6122 if (tem) 6123 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1)); 6124 } 6125 break; 6126 6127 case UDIV: 6128 /* If this is a divide by a power of two, treat it as a shift if 6129 its first operand is a shift. */ 6130 if (is_a <scalar_int_mode> (mode, &int_mode) 6131 && CONST_INT_P (XEXP (x, 1)) 6132 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0 6133 && (GET_CODE (XEXP (x, 0)) == ASHIFT 6134 || GET_CODE (XEXP (x, 0)) == LSHIFTRT 6135 || GET_CODE (XEXP (x, 0)) == ASHIFTRT 6136 || GET_CODE (XEXP (x, 0)) == ROTATE 6137 || GET_CODE (XEXP (x, 0)) == ROTATERT)) 6138 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode, 6139 XEXP (x, 0), i); 6140 break; 6141 6142 case EQ: case NE: 6143 case GT: case GTU: case GE: case GEU: 6144 case LT: case LTU: case LE: case LEU: 6145 case UNEQ: case LTGT: 6146 case UNGT: case UNGE: 6147 case UNLT: case UNLE: 6148 case UNORDERED: case ORDERED: 6149 /* If the first operand is a condition code, we can't do anything 6150 with it. */ 6151 if (GET_CODE (XEXP (x, 0)) == COMPARE 6152 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC 6153 && ! CC0_P (XEXP (x, 0)))) 6154 { 6155 rtx op0 = XEXP (x, 0); 6156 rtx op1 = XEXP (x, 1); 6157 enum rtx_code new_code; 6158 6159 if (GET_CODE (op0) == COMPARE) 6160 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0); 6161 6162 /* Simplify our comparison, if possible. */ 6163 new_code = simplify_comparison (code, &op0, &op1); 6164 6165 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X 6166 if only the low-order bit is possibly nonzero in X (such as when 6167 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to 6168 (xor X 1) or (minus 1 X); we use the former. Finally, if X is 6169 known to be either 0 or -1, NE becomes a NEG and EQ becomes 6170 (plus X 1). 6171 6172 Remove any ZERO_EXTRACT we made when thinking this was a 6173 comparison. It may now be simpler to use, e.g., an AND. If a 6174 ZERO_EXTRACT is indeed appropriate, it will be placed back by 6175 the call to make_compound_operation in the SET case. 6176 6177 Don't apply these optimizations if the caller would 6178 prefer a comparison rather than a value. 6179 E.g., for the condition in an IF_THEN_ELSE most targets need 6180 an explicit comparison. */ 6181 6182 if (in_cond) 6183 ; 6184 6185 else if (STORE_FLAG_VALUE == 1 6186 && new_code == NE 6187 && is_int_mode (mode, &int_mode) 6188 && op1 == const0_rtx 6189 && int_mode == GET_MODE (op0) 6190 && nonzero_bits (op0, int_mode) == 1) 6191 return gen_lowpart (int_mode, 6192 expand_compound_operation (op0)); 6193 6194 else if (STORE_FLAG_VALUE == 1 6195 && new_code == NE 6196 && is_int_mode (mode, &int_mode) 6197 && op1 == const0_rtx 6198 && int_mode == GET_MODE (op0) 6199 && (num_sign_bit_copies (op0, int_mode) 6200 == GET_MODE_PRECISION (int_mode))) 6201 { 6202 op0 = expand_compound_operation (op0); 6203 return simplify_gen_unary (NEG, int_mode, 6204 gen_lowpart (int_mode, op0), 6205 int_mode); 6206 } 6207 6208 else if (STORE_FLAG_VALUE == 1 6209 && new_code == EQ 6210 && is_int_mode (mode, &int_mode) 6211 && op1 == const0_rtx 6212 && int_mode == GET_MODE (op0) 6213 && nonzero_bits (op0, int_mode) == 1) 6214 { 6215 op0 = expand_compound_operation (op0); 6216 return simplify_gen_binary (XOR, int_mode, 6217 gen_lowpart (int_mode, op0), 6218 const1_rtx); 6219 } 6220 6221 else if (STORE_FLAG_VALUE == 1 6222 && new_code == EQ 6223 && is_int_mode (mode, &int_mode) 6224 && op1 == const0_rtx 6225 && int_mode == GET_MODE (op0) 6226 && (num_sign_bit_copies (op0, int_mode) 6227 == GET_MODE_PRECISION (int_mode))) 6228 { 6229 op0 = expand_compound_operation (op0); 6230 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1); 6231 } 6232 6233 /* If STORE_FLAG_VALUE is -1, we have cases similar to 6234 those above. */ 6235 if (in_cond) 6236 ; 6237 6238 else if (STORE_FLAG_VALUE == -1 6239 && new_code == NE 6240 && is_int_mode (mode, &int_mode) 6241 && op1 == const0_rtx 6242 && int_mode == GET_MODE (op0) 6243 && (num_sign_bit_copies (op0, int_mode) 6244 == GET_MODE_PRECISION (int_mode))) 6245 return gen_lowpart (int_mode, expand_compound_operation (op0)); 6246 6247 else if (STORE_FLAG_VALUE == -1 6248 && new_code == NE 6249 && is_int_mode (mode, &int_mode) 6250 && op1 == const0_rtx 6251 && int_mode == GET_MODE (op0) 6252 && nonzero_bits (op0, int_mode) == 1) 6253 { 6254 op0 = expand_compound_operation (op0); 6255 return simplify_gen_unary (NEG, int_mode, 6256 gen_lowpart (int_mode, op0), 6257 int_mode); 6258 } 6259 6260 else if (STORE_FLAG_VALUE == -1 6261 && new_code == EQ 6262 && is_int_mode (mode, &int_mode) 6263 && op1 == const0_rtx 6264 && int_mode == GET_MODE (op0) 6265 && (num_sign_bit_copies (op0, int_mode) 6266 == GET_MODE_PRECISION (int_mode))) 6267 { 6268 op0 = expand_compound_operation (op0); 6269 return simplify_gen_unary (NOT, int_mode, 6270 gen_lowpart (int_mode, op0), 6271 int_mode); 6272 } 6273 6274 /* If X is 0/1, (eq X 0) is X-1. */ 6275 else if (STORE_FLAG_VALUE == -1 6276 && new_code == EQ 6277 && is_int_mode (mode, &int_mode) 6278 && op1 == const0_rtx 6279 && int_mode == GET_MODE (op0) 6280 && nonzero_bits (op0, int_mode) == 1) 6281 { 6282 op0 = expand_compound_operation (op0); 6283 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1); 6284 } 6285 6286 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just 6287 one bit that might be nonzero, we can convert (ne x 0) to 6288 (ashift x c) where C puts the bit in the sign bit. Remove any 6289 AND with STORE_FLAG_VALUE when we are done, since we are only 6290 going to test the sign bit. */ 6291 if (new_code == NE 6292 && is_int_mode (mode, &int_mode) 6293 && HWI_COMPUTABLE_MODE_P (int_mode) 6294 && val_signbit_p (int_mode, STORE_FLAG_VALUE) 6295 && op1 == const0_rtx 6296 && int_mode == GET_MODE (op0) 6297 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0) 6298 { 6299 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode, 6300 expand_compound_operation (op0), 6301 GET_MODE_PRECISION (int_mode) - 1 - i); 6302 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx) 6303 return XEXP (x, 0); 6304 else 6305 return x; 6306 } 6307 6308 /* If the code changed, return a whole new comparison. 6309 We also need to avoid using SUBST in cases where 6310 simplify_comparison has widened a comparison with a CONST_INT, 6311 since in that case the wider CONST_INT may fail the sanity 6312 checks in do_SUBST. */ 6313 if (new_code != code 6314 || (CONST_INT_P (op1) 6315 && GET_MODE (op0) != GET_MODE (XEXP (x, 0)) 6316 && GET_MODE (op0) != GET_MODE (XEXP (x, 1)))) 6317 return gen_rtx_fmt_ee (new_code, mode, op0, op1); 6318 6319 /* Otherwise, keep this operation, but maybe change its operands. 6320 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */ 6321 SUBST (XEXP (x, 0), op0); 6322 SUBST (XEXP (x, 1), op1); 6323 } 6324 break; 6325 6326 case IF_THEN_ELSE: 6327 return simplify_if_then_else (x); 6328 6329 case ZERO_EXTRACT: 6330 case SIGN_EXTRACT: 6331 case ZERO_EXTEND: 6332 case SIGN_EXTEND: 6333 /* If we are processing SET_DEST, we are done. */ 6334 if (in_dest) 6335 return x; 6336 6337 return expand_compound_operation (x); 6338 6339 case SET: 6340 return simplify_set (x); 6341 6342 case AND: 6343 case IOR: 6344 return simplify_logical (x); 6345 6346 case ASHIFT: 6347 case LSHIFTRT: 6348 case ASHIFTRT: 6349 case ROTATE: 6350 case ROTATERT: 6351 /* If this is a shift by a constant amount, simplify it. */ 6352 if (CONST_INT_P (XEXP (x, 1))) 6353 return simplify_shift_const (x, code, mode, XEXP (x, 0), 6354 INTVAL (XEXP (x, 1))); 6355 6356 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1))) 6357 SUBST (XEXP (x, 1), 6358 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)), 6359 (HOST_WIDE_INT_1U 6360 << exact_log2 (GET_MODE_UNIT_BITSIZE 6361 (GET_MODE (x)))) 6362 - 1, 6363 0)); 6364 break; 6365 6366 default: 6367 break; 6368 } 6369 6370 return x; 6371 } 6372 6373 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */ 6374 6375 static rtx 6376 simplify_if_then_else (rtx x) 6377 { 6378 machine_mode mode = GET_MODE (x); 6379 rtx cond = XEXP (x, 0); 6380 rtx true_rtx = XEXP (x, 1); 6381 rtx false_rtx = XEXP (x, 2); 6382 enum rtx_code true_code = GET_CODE (cond); 6383 int comparison_p = COMPARISON_P (cond); 6384 rtx temp; 6385 int i; 6386 enum rtx_code false_code; 6387 rtx reversed; 6388 scalar_int_mode int_mode, inner_mode; 6389 6390 /* Simplify storing of the truth value. */ 6391 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx) 6392 return simplify_gen_relational (true_code, mode, VOIDmode, 6393 XEXP (cond, 0), XEXP (cond, 1)); 6394 6395 /* Also when the truth value has to be reversed. */ 6396 if (comparison_p 6397 && true_rtx == const0_rtx && false_rtx == const_true_rtx 6398 && (reversed = reversed_comparison (cond, mode))) 6399 return reversed; 6400 6401 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used 6402 in it is being compared against certain values. Get the true and false 6403 comparisons and see if that says anything about the value of each arm. */ 6404 6405 if (comparison_p 6406 && ((false_code = reversed_comparison_code (cond, NULL)) 6407 != UNKNOWN) 6408 && REG_P (XEXP (cond, 0))) 6409 { 6410 HOST_WIDE_INT nzb; 6411 rtx from = XEXP (cond, 0); 6412 rtx true_val = XEXP (cond, 1); 6413 rtx false_val = true_val; 6414 int swapped = 0; 6415 6416 /* If FALSE_CODE is EQ, swap the codes and arms. */ 6417 6418 if (false_code == EQ) 6419 { 6420 swapped = 1, true_code = EQ, false_code = NE; 6421 std::swap (true_rtx, false_rtx); 6422 } 6423 6424 scalar_int_mode from_mode; 6425 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode)) 6426 { 6427 /* If we are comparing against zero and the expression being 6428 tested has only a single bit that might be nonzero, that is 6429 its value when it is not equal to zero. Similarly if it is 6430 known to be -1 or 0. */ 6431 if (true_code == EQ 6432 && true_val == const0_rtx 6433 && pow2p_hwi (nzb = nonzero_bits (from, from_mode))) 6434 { 6435 false_code = EQ; 6436 false_val = gen_int_mode (nzb, from_mode); 6437 } 6438 else if (true_code == EQ 6439 && true_val == const0_rtx 6440 && (num_sign_bit_copies (from, from_mode) 6441 == GET_MODE_PRECISION (from_mode))) 6442 { 6443 false_code = EQ; 6444 false_val = constm1_rtx; 6445 } 6446 } 6447 6448 /* Now simplify an arm if we know the value of the register in the 6449 branch and it is used in the arm. Be careful due to the potential 6450 of locally-shared RTL. */ 6451 6452 if (reg_mentioned_p (from, true_rtx)) 6453 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code, 6454 from, true_val), 6455 pc_rtx, pc_rtx, 0, 0, 0); 6456 if (reg_mentioned_p (from, false_rtx)) 6457 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code, 6458 from, false_val), 6459 pc_rtx, pc_rtx, 0, 0, 0); 6460 6461 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx); 6462 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx); 6463 6464 true_rtx = XEXP (x, 1); 6465 false_rtx = XEXP (x, 2); 6466 true_code = GET_CODE (cond); 6467 } 6468 6469 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be 6470 reversed, do so to avoid needing two sets of patterns for 6471 subtract-and-branch insns. Similarly if we have a constant in the true 6472 arm, the false arm is the same as the first operand of the comparison, or 6473 the false arm is more complicated than the true arm. */ 6474 6475 if (comparison_p 6476 && reversed_comparison_code (cond, NULL) != UNKNOWN 6477 && (true_rtx == pc_rtx 6478 || (CONSTANT_P (true_rtx) 6479 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx) 6480 || true_rtx == const0_rtx 6481 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx)) 6482 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx)) 6483 && !OBJECT_P (false_rtx)) 6484 || reg_mentioned_p (true_rtx, false_rtx) 6485 || rtx_equal_p (false_rtx, XEXP (cond, 0)))) 6486 { 6487 true_code = reversed_comparison_code (cond, NULL); 6488 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond))); 6489 SUBST (XEXP (x, 1), false_rtx); 6490 SUBST (XEXP (x, 2), true_rtx); 6491 6492 std::swap (true_rtx, false_rtx); 6493 cond = XEXP (x, 0); 6494 6495 /* It is possible that the conditional has been simplified out. */ 6496 true_code = GET_CODE (cond); 6497 comparison_p = COMPARISON_P (cond); 6498 } 6499 6500 /* If the two arms are identical, we don't need the comparison. */ 6501 6502 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond)) 6503 return true_rtx; 6504 6505 /* Convert a == b ? b : a to "a". */ 6506 if (true_code == EQ && ! side_effects_p (cond) 6507 && !HONOR_NANS (mode) 6508 && rtx_equal_p (XEXP (cond, 0), false_rtx) 6509 && rtx_equal_p (XEXP (cond, 1), true_rtx)) 6510 return false_rtx; 6511 else if (true_code == NE && ! side_effects_p (cond) 6512 && !HONOR_NANS (mode) 6513 && rtx_equal_p (XEXP (cond, 0), true_rtx) 6514 && rtx_equal_p (XEXP (cond, 1), false_rtx)) 6515 return true_rtx; 6516 6517 /* Look for cases where we have (abs x) or (neg (abs X)). */ 6518 6519 if (GET_MODE_CLASS (mode) == MODE_INT 6520 && comparison_p 6521 && XEXP (cond, 1) == const0_rtx 6522 && GET_CODE (false_rtx) == NEG 6523 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0)) 6524 && rtx_equal_p (true_rtx, XEXP (cond, 0)) 6525 && ! side_effects_p (true_rtx)) 6526 switch (true_code) 6527 { 6528 case GT: 6529 case GE: 6530 return simplify_gen_unary (ABS, mode, true_rtx, mode); 6531 case LT: 6532 case LE: 6533 return 6534 simplify_gen_unary (NEG, mode, 6535 simplify_gen_unary (ABS, mode, true_rtx, mode), 6536 mode); 6537 default: 6538 break; 6539 } 6540 6541 /* Look for MIN or MAX. */ 6542 6543 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) 6544 && comparison_p 6545 && rtx_equal_p (XEXP (cond, 0), true_rtx) 6546 && rtx_equal_p (XEXP (cond, 1), false_rtx) 6547 && ! side_effects_p (cond)) 6548 switch (true_code) 6549 { 6550 case GE: 6551 case GT: 6552 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx); 6553 case LE: 6554 case LT: 6555 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx); 6556 case GEU: 6557 case GTU: 6558 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx); 6559 case LEU: 6560 case LTU: 6561 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx); 6562 default: 6563 break; 6564 } 6565 6566 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its 6567 second operand is zero, this can be done as (OP Z (mult COND C2)) where 6568 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or 6569 SIGN_EXTEND as long as Z is already extended (so we don't destroy it). 6570 We can do this kind of thing in some cases when STORE_FLAG_VALUE is 6571 neither 1 or -1, but it isn't worth checking for. */ 6572 6573 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) 6574 && comparison_p 6575 && is_int_mode (mode, &int_mode) 6576 && ! side_effects_p (x)) 6577 { 6578 rtx t = make_compound_operation (true_rtx, SET); 6579 rtx f = make_compound_operation (false_rtx, SET); 6580 rtx cond_op0 = XEXP (cond, 0); 6581 rtx cond_op1 = XEXP (cond, 1); 6582 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN; 6583 scalar_int_mode m = int_mode; 6584 rtx z = 0, c1 = NULL_RTX; 6585 6586 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS 6587 || GET_CODE (t) == IOR || GET_CODE (t) == XOR 6588 || GET_CODE (t) == ASHIFT 6589 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT) 6590 && rtx_equal_p (XEXP (t, 0), f)) 6591 c1 = XEXP (t, 1), op = GET_CODE (t), z = f; 6592 6593 /* If an identity-zero op is commutative, check whether there 6594 would be a match if we swapped the operands. */ 6595 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR 6596 || GET_CODE (t) == XOR) 6597 && rtx_equal_p (XEXP (t, 1), f)) 6598 c1 = XEXP (t, 0), op = GET_CODE (t), z = f; 6599 else if (GET_CODE (t) == SIGN_EXTEND 6600 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode) 6601 && (GET_CODE (XEXP (t, 0)) == PLUS 6602 || GET_CODE (XEXP (t, 0)) == MINUS 6603 || GET_CODE (XEXP (t, 0)) == IOR 6604 || GET_CODE (XEXP (t, 0)) == XOR 6605 || GET_CODE (XEXP (t, 0)) == ASHIFT 6606 || GET_CODE (XEXP (t, 0)) == LSHIFTRT 6607 || GET_CODE (XEXP (t, 0)) == ASHIFTRT) 6608 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG 6609 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0)) 6610 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) 6611 && (num_sign_bit_copies (f, GET_MODE (f)) 6612 > (unsigned int) 6613 (GET_MODE_PRECISION (int_mode) 6614 - GET_MODE_PRECISION (inner_mode)))) 6615 { 6616 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0)); 6617 extend_op = SIGN_EXTEND; 6618 m = inner_mode; 6619 } 6620 else if (GET_CODE (t) == SIGN_EXTEND 6621 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode) 6622 && (GET_CODE (XEXP (t, 0)) == PLUS 6623 || GET_CODE (XEXP (t, 0)) == IOR 6624 || GET_CODE (XEXP (t, 0)) == XOR) 6625 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG 6626 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1)) 6627 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) 6628 && (num_sign_bit_copies (f, GET_MODE (f)) 6629 > (unsigned int) 6630 (GET_MODE_PRECISION (int_mode) 6631 - GET_MODE_PRECISION (inner_mode)))) 6632 { 6633 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0)); 6634 extend_op = SIGN_EXTEND; 6635 m = inner_mode; 6636 } 6637 else if (GET_CODE (t) == ZERO_EXTEND 6638 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode) 6639 && (GET_CODE (XEXP (t, 0)) == PLUS 6640 || GET_CODE (XEXP (t, 0)) == MINUS 6641 || GET_CODE (XEXP (t, 0)) == IOR 6642 || GET_CODE (XEXP (t, 0)) == XOR 6643 || GET_CODE (XEXP (t, 0)) == ASHIFT 6644 || GET_CODE (XEXP (t, 0)) == LSHIFTRT 6645 || GET_CODE (XEXP (t, 0)) == ASHIFTRT) 6646 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG 6647 && HWI_COMPUTABLE_MODE_P (int_mode) 6648 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0)) 6649 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) 6650 && ((nonzero_bits (f, GET_MODE (f)) 6651 & ~GET_MODE_MASK (inner_mode)) 6652 == 0)) 6653 { 6654 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0)); 6655 extend_op = ZERO_EXTEND; 6656 m = inner_mode; 6657 } 6658 else if (GET_CODE (t) == ZERO_EXTEND 6659 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode) 6660 && (GET_CODE (XEXP (t, 0)) == PLUS 6661 || GET_CODE (XEXP (t, 0)) == IOR 6662 || GET_CODE (XEXP (t, 0)) == XOR) 6663 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG 6664 && HWI_COMPUTABLE_MODE_P (int_mode) 6665 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1)) 6666 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) 6667 && ((nonzero_bits (f, GET_MODE (f)) 6668 & ~GET_MODE_MASK (inner_mode)) 6669 == 0)) 6670 { 6671 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0)); 6672 extend_op = ZERO_EXTEND; 6673 m = inner_mode; 6674 } 6675 6676 if (z) 6677 { 6678 machine_mode cm = m; 6679 if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT) 6680 && GET_MODE (c1) != VOIDmode) 6681 cm = GET_MODE (c1); 6682 temp = subst (simplify_gen_relational (true_code, cm, VOIDmode, 6683 cond_op0, cond_op1), 6684 pc_rtx, pc_rtx, 0, 0, 0); 6685 temp = simplify_gen_binary (MULT, cm, temp, 6686 simplify_gen_binary (MULT, cm, c1, 6687 const_true_rtx)); 6688 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0); 6689 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp); 6690 6691 if (extend_op != UNKNOWN) 6692 temp = simplify_gen_unary (extend_op, int_mode, temp, m); 6693 6694 return temp; 6695 } 6696 } 6697 6698 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or 6699 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the 6700 negation of a single bit, we can convert this operation to a shift. We 6701 can actually do this more generally, but it doesn't seem worth it. */ 6702 6703 if (true_code == NE 6704 && is_a <scalar_int_mode> (mode, &int_mode) 6705 && XEXP (cond, 1) == const0_rtx 6706 && false_rtx == const0_rtx 6707 && CONST_INT_P (true_rtx) 6708 && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1 6709 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0) 6710 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode) 6711 == GET_MODE_PRECISION (int_mode)) 6712 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0))) 6713 return 6714 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, 6715 gen_lowpart (int_mode, XEXP (cond, 0)), i); 6716 6717 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only 6718 non-zero bit in A is C1. */ 6719 if (true_code == NE && XEXP (cond, 1) == const0_rtx 6720 && false_rtx == const0_rtx && CONST_INT_P (true_rtx) 6721 && is_a <scalar_int_mode> (mode, &int_mode) 6722 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode) 6723 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode)) 6724 == nonzero_bits (XEXP (cond, 0), inner_mode) 6725 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0) 6726 { 6727 rtx val = XEXP (cond, 0); 6728 if (inner_mode == int_mode) 6729 return val; 6730 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode)) 6731 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode); 6732 } 6733 6734 return x; 6735 } 6736 6737 /* Simplify X, a SET expression. Return the new expression. */ 6738 6739 static rtx 6740 simplify_set (rtx x) 6741 { 6742 rtx src = SET_SRC (x); 6743 rtx dest = SET_DEST (x); 6744 machine_mode mode 6745 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest); 6746 rtx_insn *other_insn; 6747 rtx *cc_use; 6748 scalar_int_mode int_mode; 6749 6750 /* (set (pc) (return)) gets written as (return). */ 6751 if (GET_CODE (dest) == PC && ANY_RETURN_P (src)) 6752 return src; 6753 6754 /* Now that we know for sure which bits of SRC we are using, see if we can 6755 simplify the expression for the object knowing that we only need the 6756 low-order bits. */ 6757 6758 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode)) 6759 { 6760 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0); 6761 SUBST (SET_SRC (x), src); 6762 } 6763 6764 /* If we are setting CC0 or if the source is a COMPARE, look for the use of 6765 the comparison result and try to simplify it unless we already have used 6766 undobuf.other_insn. */ 6767 if ((GET_MODE_CLASS (mode) == MODE_CC 6768 || GET_CODE (src) == COMPARE 6769 || CC0_P (dest)) 6770 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0 6771 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn) 6772 && COMPARISON_P (*cc_use) 6773 && rtx_equal_p (XEXP (*cc_use, 0), dest)) 6774 { 6775 enum rtx_code old_code = GET_CODE (*cc_use); 6776 enum rtx_code new_code; 6777 rtx op0, op1, tmp; 6778 int other_changed = 0; 6779 rtx inner_compare = NULL_RTX; 6780 machine_mode compare_mode = GET_MODE (dest); 6781 6782 if (GET_CODE (src) == COMPARE) 6783 { 6784 op0 = XEXP (src, 0), op1 = XEXP (src, 1); 6785 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) 6786 { 6787 inner_compare = op0; 6788 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1); 6789 } 6790 } 6791 else 6792 op0 = src, op1 = CONST0_RTX (GET_MODE (src)); 6793 6794 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode, 6795 op0, op1); 6796 if (!tmp) 6797 new_code = old_code; 6798 else if (!CONSTANT_P (tmp)) 6799 { 6800 new_code = GET_CODE (tmp); 6801 op0 = XEXP (tmp, 0); 6802 op1 = XEXP (tmp, 1); 6803 } 6804 else 6805 { 6806 rtx pat = PATTERN (other_insn); 6807 undobuf.other_insn = other_insn; 6808 SUBST (*cc_use, tmp); 6809 6810 /* Attempt to simplify CC user. */ 6811 if (GET_CODE (pat) == SET) 6812 { 6813 rtx new_rtx = simplify_rtx (SET_SRC (pat)); 6814 if (new_rtx != NULL_RTX) 6815 SUBST (SET_SRC (pat), new_rtx); 6816 } 6817 6818 /* Convert X into a no-op move. */ 6819 SUBST (SET_DEST (x), pc_rtx); 6820 SUBST (SET_SRC (x), pc_rtx); 6821 return x; 6822 } 6823 6824 /* Simplify our comparison, if possible. */ 6825 new_code = simplify_comparison (new_code, &op0, &op1); 6826 6827 #ifdef SELECT_CC_MODE 6828 /* If this machine has CC modes other than CCmode, check to see if we 6829 need to use a different CC mode here. */ 6830 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC) 6831 compare_mode = GET_MODE (op0); 6832 else if (inner_compare 6833 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC 6834 && new_code == old_code 6835 && op0 == XEXP (inner_compare, 0) 6836 && op1 == XEXP (inner_compare, 1)) 6837 compare_mode = GET_MODE (inner_compare); 6838 else 6839 compare_mode = SELECT_CC_MODE (new_code, op0, op1); 6840 6841 /* If the mode changed, we have to change SET_DEST, the mode in the 6842 compare, and the mode in the place SET_DEST is used. If SET_DEST is 6843 a hard register, just build new versions with the proper mode. If it 6844 is a pseudo, we lose unless it is only time we set the pseudo, in 6845 which case we can safely change its mode. */ 6846 if (!HAVE_cc0 && compare_mode != GET_MODE (dest)) 6847 { 6848 if (can_change_dest_mode (dest, 0, compare_mode)) 6849 { 6850 unsigned int regno = REGNO (dest); 6851 rtx new_dest; 6852 6853 if (regno < FIRST_PSEUDO_REGISTER) 6854 new_dest = gen_rtx_REG (compare_mode, regno); 6855 else 6856 { 6857 SUBST_MODE (regno_reg_rtx[regno], compare_mode); 6858 new_dest = regno_reg_rtx[regno]; 6859 } 6860 6861 SUBST (SET_DEST (x), new_dest); 6862 SUBST (XEXP (*cc_use, 0), new_dest); 6863 other_changed = 1; 6864 6865 dest = new_dest; 6866 } 6867 } 6868 #endif /* SELECT_CC_MODE */ 6869 6870 /* If the code changed, we have to build a new comparison in 6871 undobuf.other_insn. */ 6872 if (new_code != old_code) 6873 { 6874 int other_changed_previously = other_changed; 6875 unsigned HOST_WIDE_INT mask; 6876 rtx old_cc_use = *cc_use; 6877 6878 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use), 6879 dest, const0_rtx)); 6880 other_changed = 1; 6881 6882 /* If the only change we made was to change an EQ into an NE or 6883 vice versa, OP0 has only one bit that might be nonzero, and OP1 6884 is zero, check if changing the user of the condition code will 6885 produce a valid insn. If it won't, we can keep the original code 6886 in that insn by surrounding our operation with an XOR. */ 6887 6888 if (((old_code == NE && new_code == EQ) 6889 || (old_code == EQ && new_code == NE)) 6890 && ! other_changed_previously && op1 == const0_rtx 6891 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0)) 6892 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0)))) 6893 { 6894 rtx pat = PATTERN (other_insn), note = 0; 6895 6896 if ((recog_for_combine (&pat, other_insn, ¬e) < 0 6897 && ! check_asm_operands (pat))) 6898 { 6899 *cc_use = old_cc_use; 6900 other_changed = 0; 6901 6902 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0, 6903 gen_int_mode (mask, 6904 GET_MODE (op0))); 6905 } 6906 } 6907 } 6908 6909 if (other_changed) 6910 undobuf.other_insn = other_insn; 6911 6912 /* Don't generate a compare of a CC with 0, just use that CC. */ 6913 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx) 6914 { 6915 SUBST (SET_SRC (x), op0); 6916 src = SET_SRC (x); 6917 } 6918 /* Otherwise, if we didn't previously have the same COMPARE we 6919 want, create it from scratch. */ 6920 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode 6921 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1) 6922 { 6923 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1)); 6924 src = SET_SRC (x); 6925 } 6926 } 6927 else 6928 { 6929 /* Get SET_SRC in a form where we have placed back any 6930 compound expressions. Then do the checks below. */ 6931 src = make_compound_operation (src, SET); 6932 SUBST (SET_SRC (x), src); 6933 } 6934 6935 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation, 6936 and X being a REG or (subreg (reg)), we may be able to convert this to 6937 (set (subreg:m2 x) (op)). 6938 6939 We can always do this if M1 is narrower than M2 because that means that 6940 we only care about the low bits of the result. 6941 6942 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot 6943 perform a narrower operation than requested since the high-order bits will 6944 be undefined. On machine where it is defined, this transformation is safe 6945 as long as M1 and M2 have the same number of words. */ 6946 6947 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src) 6948 && !OBJECT_P (SUBREG_REG (src)) 6949 && (known_equal_after_align_up 6950 (GET_MODE_SIZE (GET_MODE (src)), 6951 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))), 6952 UNITS_PER_WORD)) 6953 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src)) 6954 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER 6955 && !REG_CAN_CHANGE_MODE_P (REGNO (dest), 6956 GET_MODE (SUBREG_REG (src)), 6957 GET_MODE (src))) 6958 && (REG_P (dest) 6959 || (GET_CODE (dest) == SUBREG 6960 && REG_P (SUBREG_REG (dest))))) 6961 { 6962 SUBST (SET_DEST (x), 6963 gen_lowpart (GET_MODE (SUBREG_REG (src)), 6964 dest)); 6965 SUBST (SET_SRC (x), SUBREG_REG (src)); 6966 6967 src = SET_SRC (x), dest = SET_DEST (x); 6968 } 6969 6970 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg 6971 in SRC. */ 6972 if (dest == cc0_rtx 6973 && partial_subreg_p (src) 6974 && subreg_lowpart_p (src)) 6975 { 6976 rtx inner = SUBREG_REG (src); 6977 machine_mode inner_mode = GET_MODE (inner); 6978 6979 /* Here we make sure that we don't have a sign bit on. */ 6980 if (val_signbit_known_clear_p (GET_MODE (src), 6981 nonzero_bits (inner, inner_mode))) 6982 { 6983 SUBST (SET_SRC (x), inner); 6984 src = SET_SRC (x); 6985 } 6986 } 6987 6988 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this 6989 would require a paradoxical subreg. Replace the subreg with a 6990 zero_extend to avoid the reload that would otherwise be required. 6991 Don't do this unless we have a scalar integer mode, otherwise the 6992 transformation is incorrect. */ 6993 6994 enum rtx_code extend_op; 6995 if (paradoxical_subreg_p (src) 6996 && MEM_P (SUBREG_REG (src)) 6997 && SCALAR_INT_MODE_P (GET_MODE (src)) 6998 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN) 6999 { 7000 SUBST (SET_SRC (x), 7001 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src))); 7002 7003 src = SET_SRC (x); 7004 } 7005 7006 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we 7007 are comparing an item known to be 0 or -1 against 0, use a logical 7008 operation instead. Check for one of the arms being an IOR of the other 7009 arm with some value. We compute three terms to be IOR'ed together. In 7010 practice, at most two will be nonzero. Then we do the IOR's. */ 7011 7012 if (GET_CODE (dest) != PC 7013 && GET_CODE (src) == IF_THEN_ELSE 7014 && is_int_mode (GET_MODE (src), &int_mode) 7015 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE) 7016 && XEXP (XEXP (src, 0), 1) == const0_rtx 7017 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0)) 7018 && (!HAVE_conditional_move 7019 || ! can_conditionally_move_p (int_mode)) 7020 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode) 7021 == GET_MODE_PRECISION (int_mode)) 7022 && ! side_effects_p (src)) 7023 { 7024 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE 7025 ? XEXP (src, 1) : XEXP (src, 2)); 7026 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE 7027 ? XEXP (src, 2) : XEXP (src, 1)); 7028 rtx term1 = const0_rtx, term2, term3; 7029 7030 if (GET_CODE (true_rtx) == IOR 7031 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx)) 7032 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx; 7033 else if (GET_CODE (true_rtx) == IOR 7034 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx)) 7035 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx; 7036 else if (GET_CODE (false_rtx) == IOR 7037 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)) 7038 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx; 7039 else if (GET_CODE (false_rtx) == IOR 7040 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx)) 7041 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx; 7042 7043 term2 = simplify_gen_binary (AND, int_mode, 7044 XEXP (XEXP (src, 0), 0), true_rtx); 7045 term3 = simplify_gen_binary (AND, int_mode, 7046 simplify_gen_unary (NOT, int_mode, 7047 XEXP (XEXP (src, 0), 0), 7048 int_mode), 7049 false_rtx); 7050 7051 SUBST (SET_SRC (x), 7052 simplify_gen_binary (IOR, int_mode, 7053 simplify_gen_binary (IOR, int_mode, 7054 term1, term2), 7055 term3)); 7056 7057 src = SET_SRC (x); 7058 } 7059 7060 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this 7061 whole thing fail. */ 7062 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx) 7063 return src; 7064 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx) 7065 return dest; 7066 else 7067 /* Convert this into a field assignment operation, if possible. */ 7068 return make_field_assignment (x); 7069 } 7070 7071 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified 7072 result. */ 7073 7074 static rtx 7075 simplify_logical (rtx x) 7076 { 7077 rtx op0 = XEXP (x, 0); 7078 rtx op1 = XEXP (x, 1); 7079 scalar_int_mode mode; 7080 7081 switch (GET_CODE (x)) 7082 { 7083 case AND: 7084 /* We can call simplify_and_const_int only if we don't lose 7085 any (sign) bits when converting INTVAL (op1) to 7086 "unsigned HOST_WIDE_INT". */ 7087 if (is_a <scalar_int_mode> (GET_MODE (x), &mode) 7088 && CONST_INT_P (op1) 7089 && (HWI_COMPUTABLE_MODE_P (mode) 7090 || INTVAL (op1) > 0)) 7091 { 7092 x = simplify_and_const_int (x, mode, op0, INTVAL (op1)); 7093 if (GET_CODE (x) != AND) 7094 return x; 7095 7096 op0 = XEXP (x, 0); 7097 op1 = XEXP (x, 1); 7098 } 7099 7100 /* If we have any of (and (ior A B) C) or (and (xor A B) C), 7101 apply the distributive law and then the inverse distributive 7102 law to see if things simplify. */ 7103 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR) 7104 { 7105 rtx result = distribute_and_simplify_rtx (x, 0); 7106 if (result) 7107 return result; 7108 } 7109 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR) 7110 { 7111 rtx result = distribute_and_simplify_rtx (x, 1); 7112 if (result) 7113 return result; 7114 } 7115 break; 7116 7117 case IOR: 7118 /* If we have (ior (and A B) C), apply the distributive law and then 7119 the inverse distributive law to see if things simplify. */ 7120 7121 if (GET_CODE (op0) == AND) 7122 { 7123 rtx result = distribute_and_simplify_rtx (x, 0); 7124 if (result) 7125 return result; 7126 } 7127 7128 if (GET_CODE (op1) == AND) 7129 { 7130 rtx result = distribute_and_simplify_rtx (x, 1); 7131 if (result) 7132 return result; 7133 } 7134 break; 7135 7136 default: 7137 gcc_unreachable (); 7138 } 7139 7140 return x; 7141 } 7142 7143 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound 7144 operations" because they can be replaced with two more basic operations. 7145 ZERO_EXTEND is also considered "compound" because it can be replaced with 7146 an AND operation, which is simpler, though only one operation. 7147 7148 The function expand_compound_operation is called with an rtx expression 7149 and will convert it to the appropriate shifts and AND operations, 7150 simplifying at each stage. 7151 7152 The function make_compound_operation is called to convert an expression 7153 consisting of shifts and ANDs into the equivalent compound expression. 7154 It is the inverse of this function, loosely speaking. */ 7155 7156 static rtx 7157 expand_compound_operation (rtx x) 7158 { 7159 unsigned HOST_WIDE_INT pos = 0, len; 7160 int unsignedp = 0; 7161 unsigned int modewidth; 7162 rtx tem; 7163 scalar_int_mode inner_mode; 7164 7165 switch (GET_CODE (x)) 7166 { 7167 case ZERO_EXTEND: 7168 unsignedp = 1; 7169 /* FALLTHRU */ 7170 case SIGN_EXTEND: 7171 /* We can't necessarily use a const_int for a multiword mode; 7172 it depends on implicitly extending the value. 7173 Since we don't know the right way to extend it, 7174 we can't tell whether the implicit way is right. 7175 7176 Even for a mode that is no wider than a const_int, 7177 we can't win, because we need to sign extend one of its bits through 7178 the rest of it, and we don't know which bit. */ 7179 if (CONST_INT_P (XEXP (x, 0))) 7180 return x; 7181 7182 /* Reject modes that aren't scalar integers because turning vector 7183 or complex modes into shifts causes problems. */ 7184 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)) 7185 return x; 7186 7187 /* Return if (subreg:MODE FROM 0) is not a safe replacement for 7188 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM 7189 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be 7190 reloaded. If not for that, MEM's would very rarely be safe. 7191 7192 Reject modes bigger than a word, because we might not be able 7193 to reference a two-register group starting with an arbitrary register 7194 (and currently gen_lowpart might crash for a SUBREG). */ 7195 7196 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD) 7197 return x; 7198 7199 len = GET_MODE_PRECISION (inner_mode); 7200 /* If the inner object has VOIDmode (the only way this can happen 7201 is if it is an ASM_OPERANDS), we can't do anything since we don't 7202 know how much masking to do. */ 7203 if (len == 0) 7204 return x; 7205 7206 break; 7207 7208 case ZERO_EXTRACT: 7209 unsignedp = 1; 7210 7211 /* fall through */ 7212 7213 case SIGN_EXTRACT: 7214 /* If the operand is a CLOBBER, just return it. */ 7215 if (GET_CODE (XEXP (x, 0)) == CLOBBER) 7216 return XEXP (x, 0); 7217 7218 if (!CONST_INT_P (XEXP (x, 1)) 7219 || !CONST_INT_P (XEXP (x, 2))) 7220 return x; 7221 7222 /* Reject modes that aren't scalar integers because turning vector 7223 or complex modes into shifts causes problems. */ 7224 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)) 7225 return x; 7226 7227 len = INTVAL (XEXP (x, 1)); 7228 pos = INTVAL (XEXP (x, 2)); 7229 7230 /* This should stay within the object being extracted, fail otherwise. */ 7231 if (len + pos > GET_MODE_PRECISION (inner_mode)) 7232 return x; 7233 7234 if (BITS_BIG_ENDIAN) 7235 pos = GET_MODE_PRECISION (inner_mode) - len - pos; 7236 7237 break; 7238 7239 default: 7240 return x; 7241 } 7242 7243 /* We've rejected non-scalar operations by now. */ 7244 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x)); 7245 7246 /* Convert sign extension to zero extension, if we know that the high 7247 bit is not set, as this is easier to optimize. It will be converted 7248 back to cheaper alternative in make_extraction. */ 7249 if (GET_CODE (x) == SIGN_EXTEND 7250 && HWI_COMPUTABLE_MODE_P (mode) 7251 && ((nonzero_bits (XEXP (x, 0), inner_mode) 7252 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1)) 7253 == 0)) 7254 { 7255 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0)); 7256 rtx temp2 = expand_compound_operation (temp); 7257 7258 /* Make sure this is a profitable operation. */ 7259 if (set_src_cost (x, mode, optimize_this_for_speed_p) 7260 > set_src_cost (temp2, mode, optimize_this_for_speed_p)) 7261 return temp2; 7262 else if (set_src_cost (x, mode, optimize_this_for_speed_p) 7263 > set_src_cost (temp, mode, optimize_this_for_speed_p)) 7264 return temp; 7265 else 7266 return x; 7267 } 7268 7269 /* We can optimize some special cases of ZERO_EXTEND. */ 7270 if (GET_CODE (x) == ZERO_EXTEND) 7271 { 7272 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we 7273 know that the last value didn't have any inappropriate bits 7274 set. */ 7275 if (GET_CODE (XEXP (x, 0)) == TRUNCATE 7276 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode 7277 && HWI_COMPUTABLE_MODE_P (mode) 7278 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode) 7279 & ~GET_MODE_MASK (inner_mode)) == 0) 7280 return XEXP (XEXP (x, 0), 0); 7281 7282 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */ 7283 if (GET_CODE (XEXP (x, 0)) == SUBREG 7284 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode 7285 && subreg_lowpart_p (XEXP (x, 0)) 7286 && HWI_COMPUTABLE_MODE_P (mode) 7287 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode) 7288 & ~GET_MODE_MASK (inner_mode)) == 0) 7289 return SUBREG_REG (XEXP (x, 0)); 7290 7291 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo 7292 is a comparison and STORE_FLAG_VALUE permits. This is like 7293 the first case, but it works even when MODE is larger 7294 than HOST_WIDE_INT. */ 7295 if (GET_CODE (XEXP (x, 0)) == TRUNCATE 7296 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode 7297 && COMPARISON_P (XEXP (XEXP (x, 0), 0)) 7298 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT 7299 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0) 7300 return XEXP (XEXP (x, 0), 0); 7301 7302 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */ 7303 if (GET_CODE (XEXP (x, 0)) == SUBREG 7304 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode 7305 && subreg_lowpart_p (XEXP (x, 0)) 7306 && COMPARISON_P (SUBREG_REG (XEXP (x, 0))) 7307 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT 7308 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0) 7309 return SUBREG_REG (XEXP (x, 0)); 7310 7311 } 7312 7313 /* If we reach here, we want to return a pair of shifts. The inner 7314 shift is a left shift of BITSIZE - POS - LEN bits. The outer 7315 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or 7316 logical depending on the value of UNSIGNEDP. 7317 7318 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be 7319 converted into an AND of a shift. 7320 7321 We must check for the case where the left shift would have a negative 7322 count. This can happen in a case like (x >> 31) & 255 on machines 7323 that can't shift by a constant. On those machines, we would first 7324 combine the shift with the AND to produce a variable-position 7325 extraction. Then the constant of 31 would be substituted in 7326 to produce such a position. */ 7327 7328 modewidth = GET_MODE_PRECISION (mode); 7329 if (modewidth >= pos + len) 7330 { 7331 tem = gen_lowpart (mode, XEXP (x, 0)); 7332 if (!tem || GET_CODE (tem) == CLOBBER) 7333 return x; 7334 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode, 7335 tem, modewidth - pos - len); 7336 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT, 7337 mode, tem, modewidth - len); 7338 } 7339 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT) 7340 tem = simplify_and_const_int (NULL_RTX, mode, 7341 simplify_shift_const (NULL_RTX, LSHIFTRT, 7342 mode, XEXP (x, 0), 7343 pos), 7344 (HOST_WIDE_INT_1U << len) - 1); 7345 else 7346 /* Any other cases we can't handle. */ 7347 return x; 7348 7349 /* If we couldn't do this for some reason, return the original 7350 expression. */ 7351 if (GET_CODE (tem) == CLOBBER) 7352 return x; 7353 7354 return tem; 7355 } 7356 7357 /* X is a SET which contains an assignment of one object into 7358 a part of another (such as a bit-field assignment, STRICT_LOW_PART, 7359 or certain SUBREGS). If possible, convert it into a series of 7360 logical operations. 7361 7362 We half-heartedly support variable positions, but do not at all 7363 support variable lengths. */ 7364 7365 static const_rtx 7366 expand_field_assignment (const_rtx x) 7367 { 7368 rtx inner; 7369 rtx pos; /* Always counts from low bit. */ 7370 int len, inner_len; 7371 rtx mask, cleared, masked; 7372 scalar_int_mode compute_mode; 7373 7374 /* Loop until we find something we can't simplify. */ 7375 while (1) 7376 { 7377 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART 7378 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG) 7379 { 7380 rtx x0 = XEXP (SET_DEST (x), 0); 7381 if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (&len)) 7382 break; 7383 inner = SUBREG_REG (XEXP (SET_DEST (x), 0)); 7384 pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)), 7385 MAX_MODE_INT); 7386 } 7387 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT 7388 && CONST_INT_P (XEXP (SET_DEST (x), 1))) 7389 { 7390 inner = XEXP (SET_DEST (x), 0); 7391 if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (&inner_len)) 7392 break; 7393 7394 len = INTVAL (XEXP (SET_DEST (x), 1)); 7395 pos = XEXP (SET_DEST (x), 2); 7396 7397 /* A constant position should stay within the width of INNER. */ 7398 if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len) 7399 break; 7400 7401 if (BITS_BIG_ENDIAN) 7402 { 7403 if (CONST_INT_P (pos)) 7404 pos = GEN_INT (inner_len - len - INTVAL (pos)); 7405 else if (GET_CODE (pos) == MINUS 7406 && CONST_INT_P (XEXP (pos, 1)) 7407 && INTVAL (XEXP (pos, 1)) == inner_len - len) 7408 /* If position is ADJUST - X, new position is X. */ 7409 pos = XEXP (pos, 0); 7410 else 7411 pos = simplify_gen_binary (MINUS, GET_MODE (pos), 7412 gen_int_mode (inner_len - len, 7413 GET_MODE (pos)), 7414 pos); 7415 } 7416 } 7417 7418 /* If the destination is a subreg that overwrites the whole of the inner 7419 register, we can move the subreg to the source. */ 7420 else if (GET_CODE (SET_DEST (x)) == SUBREG 7421 /* We need SUBREGs to compute nonzero_bits properly. */ 7422 && nonzero_sign_valid 7423 && !read_modify_subreg_p (SET_DEST (x))) 7424 { 7425 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)), 7426 gen_lowpart 7427 (GET_MODE (SUBREG_REG (SET_DEST (x))), 7428 SET_SRC (x))); 7429 continue; 7430 } 7431 else 7432 break; 7433 7434 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner)) 7435 inner = SUBREG_REG (inner); 7436 7437 /* Don't attempt bitwise arithmetic on non scalar integer modes. */ 7438 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode)) 7439 { 7440 /* Don't do anything for vector or complex integral types. */ 7441 if (! FLOAT_MODE_P (GET_MODE (inner))) 7442 break; 7443 7444 /* Try to find an integral mode to pun with. */ 7445 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0) 7446 .exists (&compute_mode)) 7447 break; 7448 7449 inner = gen_lowpart (compute_mode, inner); 7450 } 7451 7452 /* Compute a mask of LEN bits, if we can do this on the host machine. */ 7453 if (len >= HOST_BITS_PER_WIDE_INT) 7454 break; 7455 7456 /* Don't try to compute in too wide unsupported modes. */ 7457 if (!targetm.scalar_mode_supported_p (compute_mode)) 7458 break; 7459 7460 /* Now compute the equivalent expression. Make a copy of INNER 7461 for the SET_DEST in case it is a MEM into which we will substitute; 7462 we don't want shared RTL in that case. */ 7463 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1, 7464 compute_mode); 7465 cleared = simplify_gen_binary (AND, compute_mode, 7466 simplify_gen_unary (NOT, compute_mode, 7467 simplify_gen_binary (ASHIFT, 7468 compute_mode, 7469 mask, pos), 7470 compute_mode), 7471 inner); 7472 masked = simplify_gen_binary (ASHIFT, compute_mode, 7473 simplify_gen_binary ( 7474 AND, compute_mode, 7475 gen_lowpart (compute_mode, SET_SRC (x)), 7476 mask), 7477 pos); 7478 7479 x = gen_rtx_SET (copy_rtx (inner), 7480 simplify_gen_binary (IOR, compute_mode, 7481 cleared, masked)); 7482 } 7483 7484 return x; 7485 } 7486 7487 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero, 7488 it is an RTX that represents the (variable) starting position; otherwise, 7489 POS is the (constant) starting bit position. Both are counted from the LSB. 7490 7491 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one. 7492 7493 IN_DEST is nonzero if this is a reference in the destination of a SET. 7494 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero, 7495 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will 7496 be used. 7497 7498 IN_COMPARE is nonzero if we are in a COMPARE. This means that a 7499 ZERO_EXTRACT should be built even for bits starting at bit 0. 7500 7501 MODE is the desired mode of the result (if IN_DEST == 0). 7502 7503 The result is an RTX for the extraction or NULL_RTX if the target 7504 can't handle it. */ 7505 7506 static rtx 7507 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos, 7508 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp, 7509 int in_dest, int in_compare) 7510 { 7511 /* This mode describes the size of the storage area 7512 to fetch the overall value from. Within that, we 7513 ignore the POS lowest bits, etc. */ 7514 machine_mode is_mode = GET_MODE (inner); 7515 machine_mode inner_mode; 7516 scalar_int_mode wanted_inner_mode; 7517 scalar_int_mode wanted_inner_reg_mode = word_mode; 7518 scalar_int_mode pos_mode = word_mode; 7519 machine_mode extraction_mode = word_mode; 7520 rtx new_rtx = 0; 7521 rtx orig_pos_rtx = pos_rtx; 7522 HOST_WIDE_INT orig_pos; 7523 7524 if (pos_rtx && CONST_INT_P (pos_rtx)) 7525 pos = INTVAL (pos_rtx), pos_rtx = 0; 7526 7527 if (GET_CODE (inner) == SUBREG 7528 && subreg_lowpart_p (inner) 7529 && (paradoxical_subreg_p (inner) 7530 /* If trying or potentionally trying to extract 7531 bits outside of is_mode, don't look through 7532 non-paradoxical SUBREGs. See PR82192. */ 7533 || (pos_rtx == NULL_RTX 7534 && known_le (pos + len, GET_MODE_PRECISION (is_mode))))) 7535 { 7536 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...), 7537 consider just the QI as the memory to extract from. 7538 The subreg adds or removes high bits; its mode is 7539 irrelevant to the meaning of this extraction, 7540 since POS and LEN count from the lsb. */ 7541 if (MEM_P (SUBREG_REG (inner))) 7542 is_mode = GET_MODE (SUBREG_REG (inner)); 7543 inner = SUBREG_REG (inner); 7544 } 7545 else if (GET_CODE (inner) == ASHIFT 7546 && CONST_INT_P (XEXP (inner, 1)) 7547 && pos_rtx == 0 && pos == 0 7548 && len > UINTVAL (XEXP (inner, 1))) 7549 { 7550 /* We're extracting the least significant bits of an rtx 7551 (ashift X (const_int C)), where LEN > C. Extract the 7552 least significant (LEN - C) bits of X, giving an rtx 7553 whose mode is MODE, then shift it left C times. */ 7554 new_rtx = make_extraction (mode, XEXP (inner, 0), 7555 0, 0, len - INTVAL (XEXP (inner, 1)), 7556 unsignedp, in_dest, in_compare); 7557 if (new_rtx != 0) 7558 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1)); 7559 } 7560 else if (GET_CODE (inner) == TRUNCATE 7561 /* If trying or potentionally trying to extract 7562 bits outside of is_mode, don't look through 7563 TRUNCATE. See PR82192. */ 7564 && pos_rtx == NULL_RTX 7565 && known_le (pos + len, GET_MODE_PRECISION (is_mode))) 7566 inner = XEXP (inner, 0); 7567 7568 inner_mode = GET_MODE (inner); 7569 7570 /* See if this can be done without an extraction. We never can if the 7571 width of the field is not the same as that of some integer mode. For 7572 registers, we can only avoid the extraction if the position is at the 7573 low-order bit and this is either not in the destination or we have the 7574 appropriate STRICT_LOW_PART operation available. 7575 7576 For MEM, we can avoid an extract if the field starts on an appropriate 7577 boundary and we can change the mode of the memory reference. */ 7578 7579 scalar_int_mode tmode; 7580 if (int_mode_for_size (len, 1).exists (&tmode) 7581 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0 7582 && !MEM_P (inner) 7583 && (pos == 0 || REG_P (inner)) 7584 && (inner_mode == tmode 7585 || !REG_P (inner) 7586 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode) 7587 || reg_truncated_to_mode (tmode, inner)) 7588 && (! in_dest 7589 || (REG_P (inner) 7590 && have_insn_for (STRICT_LOW_PART, tmode)))) 7591 || (MEM_P (inner) && pos_rtx == 0 7592 && (pos 7593 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode) 7594 : BITS_PER_UNIT)) == 0 7595 /* We can't do this if we are widening INNER_MODE (it 7596 may not be aligned, for one thing). */ 7597 && !paradoxical_subreg_p (tmode, inner_mode) 7598 && (inner_mode == tmode 7599 || (! mode_dependent_address_p (XEXP (inner, 0), 7600 MEM_ADDR_SPACE (inner)) 7601 && ! MEM_VOLATILE_P (inner)))))) 7602 { 7603 /* If INNER is a MEM, make a new MEM that encompasses just the desired 7604 field. If the original and current mode are the same, we need not 7605 adjust the offset. Otherwise, we do if bytes big endian. 7606 7607 If INNER is not a MEM, get a piece consisting of just the field 7608 of interest (in this case POS % BITS_PER_WORD must be 0). */ 7609 7610 if (MEM_P (inner)) 7611 { 7612 poly_int64 offset; 7613 7614 /* POS counts from lsb, but make OFFSET count in memory order. */ 7615 if (BYTES_BIG_ENDIAN) 7616 offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode) 7617 - len - pos); 7618 else 7619 offset = pos / BITS_PER_UNIT; 7620 7621 new_rtx = adjust_address_nv (inner, tmode, offset); 7622 } 7623 else if (REG_P (inner)) 7624 { 7625 if (tmode != inner_mode) 7626 { 7627 /* We can't call gen_lowpart in a DEST since we 7628 always want a SUBREG (see below) and it would sometimes 7629 return a new hard register. */ 7630 if (pos || in_dest) 7631 { 7632 poly_uint64 offset 7633 = subreg_offset_from_lsb (tmode, inner_mode, pos); 7634 7635 /* Avoid creating invalid subregs, for example when 7636 simplifying (x>>32)&255. */ 7637 if (!validate_subreg (tmode, inner_mode, inner, offset)) 7638 return NULL_RTX; 7639 7640 new_rtx = gen_rtx_SUBREG (tmode, inner, offset); 7641 } 7642 else 7643 new_rtx = gen_lowpart (tmode, inner); 7644 } 7645 else 7646 new_rtx = inner; 7647 } 7648 else 7649 new_rtx = force_to_mode (inner, tmode, 7650 len >= HOST_BITS_PER_WIDE_INT 7651 ? HOST_WIDE_INT_M1U 7652 : (HOST_WIDE_INT_1U << len) - 1, 0); 7653 7654 /* If this extraction is going into the destination of a SET, 7655 make a STRICT_LOW_PART unless we made a MEM. */ 7656 7657 if (in_dest) 7658 return (MEM_P (new_rtx) ? new_rtx 7659 : (GET_CODE (new_rtx) != SUBREG 7660 ? gen_rtx_CLOBBER (tmode, const0_rtx) 7661 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx))); 7662 7663 if (mode == tmode) 7664 return new_rtx; 7665 7666 if (CONST_SCALAR_INT_P (new_rtx)) 7667 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND, 7668 mode, new_rtx, tmode); 7669 7670 /* If we know that no extraneous bits are set, and that the high 7671 bit is not set, convert the extraction to the cheaper of 7672 sign and zero extension, that are equivalent in these cases. */ 7673 if (flag_expensive_optimizations 7674 && (HWI_COMPUTABLE_MODE_P (tmode) 7675 && ((nonzero_bits (new_rtx, tmode) 7676 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1)) 7677 == 0))) 7678 { 7679 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx); 7680 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx); 7681 7682 /* Prefer ZERO_EXTENSION, since it gives more information to 7683 backends. */ 7684 if (set_src_cost (temp, mode, optimize_this_for_speed_p) 7685 <= set_src_cost (temp1, mode, optimize_this_for_speed_p)) 7686 return temp; 7687 return temp1; 7688 } 7689 7690 /* Otherwise, sign- or zero-extend unless we already are in the 7691 proper mode. */ 7692 7693 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND, 7694 mode, new_rtx)); 7695 } 7696 7697 /* Unless this is a COMPARE or we have a funny memory reference, 7698 don't do anything with zero-extending field extracts starting at 7699 the low-order bit since they are simple AND operations. */ 7700 if (pos_rtx == 0 && pos == 0 && ! in_dest 7701 && ! in_compare && unsignedp) 7702 return 0; 7703 7704 /* Unless INNER is not MEM, reject this if we would be spanning bytes or 7705 if the position is not a constant and the length is not 1. In all 7706 other cases, we would only be going outside our object in cases when 7707 an original shift would have been undefined. */ 7708 if (MEM_P (inner) 7709 && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode))) 7710 || (pos_rtx != 0 && len != 1))) 7711 return 0; 7712 7713 enum extraction_pattern pattern = (in_dest ? EP_insv 7714 : unsignedp ? EP_extzv : EP_extv); 7715 7716 /* If INNER is not from memory, we want it to have the mode of a register 7717 extraction pattern's structure operand, or word_mode if there is no 7718 such pattern. The same applies to extraction_mode and pos_mode 7719 and their respective operands. 7720 7721 For memory, assume that the desired extraction_mode and pos_mode 7722 are the same as for a register operation, since at present we don't 7723 have named patterns for aligned memory structures. */ 7724 struct extraction_insn insn; 7725 unsigned int inner_size; 7726 if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size) 7727 && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode)) 7728 { 7729 wanted_inner_reg_mode = insn.struct_mode.require (); 7730 pos_mode = insn.pos_mode; 7731 extraction_mode = insn.field_mode; 7732 } 7733 7734 /* Never narrow an object, since that might not be safe. */ 7735 7736 if (mode != VOIDmode 7737 && partial_subreg_p (extraction_mode, mode)) 7738 extraction_mode = mode; 7739 7740 if (!MEM_P (inner)) 7741 wanted_inner_mode = wanted_inner_reg_mode; 7742 else 7743 { 7744 /* Be careful not to go beyond the extracted object and maintain the 7745 natural alignment of the memory. */ 7746 wanted_inner_mode = smallest_int_mode_for_size (len); 7747 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len 7748 > GET_MODE_BITSIZE (wanted_inner_mode)) 7749 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require (); 7750 } 7751 7752 orig_pos = pos; 7753 7754 if (BITS_BIG_ENDIAN) 7755 { 7756 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to 7757 BITS_BIG_ENDIAN style. If position is constant, compute new 7758 position. Otherwise, build subtraction. 7759 Note that POS is relative to the mode of the original argument. 7760 If it's a MEM we need to recompute POS relative to that. 7761 However, if we're extracting from (or inserting into) a register, 7762 we want to recompute POS relative to wanted_inner_mode. */ 7763 int width; 7764 if (!MEM_P (inner)) 7765 width = GET_MODE_BITSIZE (wanted_inner_mode); 7766 else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width)) 7767 return NULL_RTX; 7768 7769 if (pos_rtx == 0) 7770 pos = width - len - pos; 7771 else 7772 pos_rtx 7773 = gen_rtx_MINUS (GET_MODE (pos_rtx), 7774 gen_int_mode (width - len, GET_MODE (pos_rtx)), 7775 pos_rtx); 7776 /* POS may be less than 0 now, but we check for that below. 7777 Note that it can only be less than 0 if !MEM_P (inner). */ 7778 } 7779 7780 /* If INNER has a wider mode, and this is a constant extraction, try to 7781 make it smaller and adjust the byte to point to the byte containing 7782 the value. */ 7783 if (wanted_inner_mode != VOIDmode 7784 && inner_mode != wanted_inner_mode 7785 && ! pos_rtx 7786 && partial_subreg_p (wanted_inner_mode, is_mode) 7787 && MEM_P (inner) 7788 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner)) 7789 && ! MEM_VOLATILE_P (inner)) 7790 { 7791 poly_int64 offset = 0; 7792 7793 /* The computations below will be correct if the machine is big 7794 endian in both bits and bytes or little endian in bits and bytes. 7795 If it is mixed, we must adjust. */ 7796 7797 /* If bytes are big endian and we had a paradoxical SUBREG, we must 7798 adjust OFFSET to compensate. */ 7799 if (BYTES_BIG_ENDIAN 7800 && paradoxical_subreg_p (is_mode, inner_mode)) 7801 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode); 7802 7803 /* We can now move to the desired byte. */ 7804 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode)) 7805 * GET_MODE_SIZE (wanted_inner_mode); 7806 pos %= GET_MODE_BITSIZE (wanted_inner_mode); 7807 7808 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN 7809 && is_mode != wanted_inner_mode) 7810 offset = (GET_MODE_SIZE (is_mode) 7811 - GET_MODE_SIZE (wanted_inner_mode) - offset); 7812 7813 inner = adjust_address_nv (inner, wanted_inner_mode, offset); 7814 } 7815 7816 /* If INNER is not memory, get it into the proper mode. If we are changing 7817 its mode, POS must be a constant and smaller than the size of the new 7818 mode. */ 7819 else if (!MEM_P (inner)) 7820 { 7821 /* On the LHS, don't create paradoxical subregs implicitely truncating 7822 the register unless TARGET_TRULY_NOOP_TRUNCATION. */ 7823 if (in_dest 7824 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner), 7825 wanted_inner_mode)) 7826 return NULL_RTX; 7827 7828 if (GET_MODE (inner) != wanted_inner_mode 7829 && (pos_rtx != 0 7830 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode))) 7831 return NULL_RTX; 7832 7833 if (orig_pos < 0) 7834 return NULL_RTX; 7835 7836 inner = force_to_mode (inner, wanted_inner_mode, 7837 pos_rtx 7838 || len + orig_pos >= HOST_BITS_PER_WIDE_INT 7839 ? HOST_WIDE_INT_M1U 7840 : (((HOST_WIDE_INT_1U << len) - 1) 7841 << orig_pos), 7842 0); 7843 } 7844 7845 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we 7846 have to zero extend. Otherwise, we can just use a SUBREG. 7847 7848 We dealt with constant rtxes earlier, so pos_rtx cannot 7849 have VOIDmode at this point. */ 7850 if (pos_rtx != 0 7851 && (GET_MODE_SIZE (pos_mode) 7852 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx))))) 7853 { 7854 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx, 7855 GET_MODE (pos_rtx)); 7856 7857 /* If we know that no extraneous bits are set, and that the high 7858 bit is not set, convert extraction to cheaper one - either 7859 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these 7860 cases. */ 7861 if (flag_expensive_optimizations 7862 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx)) 7863 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx)) 7864 & ~(((unsigned HOST_WIDE_INT) 7865 GET_MODE_MASK (GET_MODE (pos_rtx))) 7866 >> 1)) 7867 == 0))) 7868 { 7869 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx, 7870 GET_MODE (pos_rtx)); 7871 7872 /* Prefer ZERO_EXTENSION, since it gives more information to 7873 backends. */ 7874 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p) 7875 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p)) 7876 temp = temp1; 7877 } 7878 pos_rtx = temp; 7879 } 7880 7881 /* Make POS_RTX unless we already have it and it is correct. If we don't 7882 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must 7883 be a CONST_INT. */ 7884 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos) 7885 pos_rtx = orig_pos_rtx; 7886 7887 else if (pos_rtx == 0) 7888 pos_rtx = GEN_INT (pos); 7889 7890 /* Make the required operation. See if we can use existing rtx. */ 7891 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT, 7892 extraction_mode, inner, GEN_INT (len), pos_rtx); 7893 if (! in_dest) 7894 new_rtx = gen_lowpart (mode, new_rtx); 7895 7896 return new_rtx; 7897 } 7898 7899 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that 7900 can be commuted with any other operations in X. Return X without 7901 that shift if so. */ 7902 7903 static rtx 7904 extract_left_shift (scalar_int_mode mode, rtx x, int count) 7905 { 7906 enum rtx_code code = GET_CODE (x); 7907 rtx tem; 7908 7909 switch (code) 7910 { 7911 case ASHIFT: 7912 /* This is the shift itself. If it is wide enough, we will return 7913 either the value being shifted if the shift count is equal to 7914 COUNT or a shift for the difference. */ 7915 if (CONST_INT_P (XEXP (x, 1)) 7916 && INTVAL (XEXP (x, 1)) >= count) 7917 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0), 7918 INTVAL (XEXP (x, 1)) - count); 7919 break; 7920 7921 case NEG: case NOT: 7922 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0) 7923 return simplify_gen_unary (code, mode, tem, mode); 7924 7925 break; 7926 7927 case PLUS: case IOR: case XOR: case AND: 7928 /* If we can safely shift this constant and we find the inner shift, 7929 make a new operation. */ 7930 if (CONST_INT_P (XEXP (x, 1)) 7931 && (UINTVAL (XEXP (x, 1)) 7932 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0 7933 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0) 7934 { 7935 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count; 7936 return simplify_gen_binary (code, mode, tem, 7937 gen_int_mode (val, mode)); 7938 } 7939 break; 7940 7941 default: 7942 break; 7943 } 7944 7945 return 0; 7946 } 7947 7948 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current 7949 level of the expression and MODE is its mode. IN_CODE is as for 7950 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE 7951 that should be used when recursing on operands of *X_PTR. 7952 7953 There are two possible actions: 7954 7955 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE 7956 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value. 7957 7958 - Return a new rtx, which the caller returns directly. */ 7959 7960 static rtx 7961 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr, 7962 enum rtx_code in_code, 7963 enum rtx_code *next_code_ptr) 7964 { 7965 rtx x = *x_ptr; 7966 enum rtx_code next_code = *next_code_ptr; 7967 enum rtx_code code = GET_CODE (x); 7968 int mode_width = GET_MODE_PRECISION (mode); 7969 rtx rhs, lhs; 7970 rtx new_rtx = 0; 7971 int i; 7972 rtx tem; 7973 scalar_int_mode inner_mode; 7974 bool equality_comparison = false; 7975 7976 if (in_code == EQ) 7977 { 7978 equality_comparison = true; 7979 in_code = COMPARE; 7980 } 7981 7982 /* Process depending on the code of this operation. If NEW is set 7983 nonzero, it will be returned. */ 7984 7985 switch (code) 7986 { 7987 case ASHIFT: 7988 /* Convert shifts by constants into multiplications if inside 7989 an address. */ 7990 if (in_code == MEM && CONST_INT_P (XEXP (x, 1)) 7991 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT 7992 && INTVAL (XEXP (x, 1)) >= 0) 7993 { 7994 HOST_WIDE_INT count = INTVAL (XEXP (x, 1)); 7995 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count; 7996 7997 new_rtx = make_compound_operation (XEXP (x, 0), next_code); 7998 if (GET_CODE (new_rtx) == NEG) 7999 { 8000 new_rtx = XEXP (new_rtx, 0); 8001 multval = -multval; 8002 } 8003 multval = trunc_int_for_mode (multval, mode); 8004 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode)); 8005 } 8006 break; 8007 8008 case PLUS: 8009 lhs = XEXP (x, 0); 8010 rhs = XEXP (x, 1); 8011 lhs = make_compound_operation (lhs, next_code); 8012 rhs = make_compound_operation (rhs, next_code); 8013 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG) 8014 { 8015 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0), 8016 XEXP (lhs, 1)); 8017 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem); 8018 } 8019 else if (GET_CODE (lhs) == MULT 8020 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0)) 8021 { 8022 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0), 8023 simplify_gen_unary (NEG, mode, 8024 XEXP (lhs, 1), 8025 mode)); 8026 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem); 8027 } 8028 else 8029 { 8030 SUBST (XEXP (x, 0), lhs); 8031 SUBST (XEXP (x, 1), rhs); 8032 } 8033 maybe_swap_commutative_operands (x); 8034 return x; 8035 8036 case MINUS: 8037 lhs = XEXP (x, 0); 8038 rhs = XEXP (x, 1); 8039 lhs = make_compound_operation (lhs, next_code); 8040 rhs = make_compound_operation (rhs, next_code); 8041 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG) 8042 { 8043 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0), 8044 XEXP (rhs, 1)); 8045 return simplify_gen_binary (PLUS, mode, tem, lhs); 8046 } 8047 else if (GET_CODE (rhs) == MULT 8048 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0)) 8049 { 8050 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0), 8051 simplify_gen_unary (NEG, mode, 8052 XEXP (rhs, 1), 8053 mode)); 8054 return simplify_gen_binary (PLUS, mode, tem, lhs); 8055 } 8056 else 8057 { 8058 SUBST (XEXP (x, 0), lhs); 8059 SUBST (XEXP (x, 1), rhs); 8060 return x; 8061 } 8062 8063 case AND: 8064 /* If the second operand is not a constant, we can't do anything 8065 with it. */ 8066 if (!CONST_INT_P (XEXP (x, 1))) 8067 break; 8068 8069 /* If the constant is a power of two minus one and the first operand 8070 is a logical right shift, make an extraction. */ 8071 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT 8072 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0) 8073 { 8074 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code); 8075 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), 8076 i, 1, 0, in_code == COMPARE); 8077 } 8078 8079 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */ 8080 else if (GET_CODE (XEXP (x, 0)) == SUBREG 8081 && subreg_lowpart_p (XEXP (x, 0)) 8082 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))), 8083 &inner_mode) 8084 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT 8085 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0) 8086 { 8087 rtx inner_x0 = SUBREG_REG (XEXP (x, 0)); 8088 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code); 8089 new_rtx = make_extraction (inner_mode, new_rtx, 0, 8090 XEXP (inner_x0, 1), 8091 i, 1, 0, in_code == COMPARE); 8092 8093 /* If we narrowed the mode when dropping the subreg, then we lose. */ 8094 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode)) 8095 new_rtx = NULL; 8096 8097 /* If that didn't give anything, see if the AND simplifies on 8098 its own. */ 8099 if (!new_rtx && i >= 0) 8100 { 8101 new_rtx = make_compound_operation (XEXP (x, 0), next_code); 8102 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1, 8103 0, in_code == COMPARE); 8104 } 8105 } 8106 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */ 8107 else if ((GET_CODE (XEXP (x, 0)) == XOR 8108 || GET_CODE (XEXP (x, 0)) == IOR) 8109 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT 8110 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT 8111 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0) 8112 { 8113 /* Apply the distributive law, and then try to make extractions. */ 8114 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode, 8115 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0), 8116 XEXP (x, 1)), 8117 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1), 8118 XEXP (x, 1))); 8119 new_rtx = make_compound_operation (new_rtx, in_code); 8120 } 8121 8122 /* If we are have (and (rotate X C) M) and C is larger than the number 8123 of bits in M, this is an extraction. */ 8124 8125 else if (GET_CODE (XEXP (x, 0)) == ROTATE 8126 && CONST_INT_P (XEXP (XEXP (x, 0), 1)) 8127 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0 8128 && i <= INTVAL (XEXP (XEXP (x, 0), 1))) 8129 { 8130 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code); 8131 new_rtx = make_extraction (mode, new_rtx, 8132 (GET_MODE_PRECISION (mode) 8133 - INTVAL (XEXP (XEXP (x, 0), 1))), 8134 NULL_RTX, i, 1, 0, in_code == COMPARE); 8135 } 8136 8137 /* On machines without logical shifts, if the operand of the AND is 8138 a logical shift and our mask turns off all the propagated sign 8139 bits, we can replace the logical shift with an arithmetic shift. */ 8140 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT 8141 && !have_insn_for (LSHIFTRT, mode) 8142 && have_insn_for (ASHIFTRT, mode) 8143 && CONST_INT_P (XEXP (XEXP (x, 0), 1)) 8144 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 8145 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT 8146 && mode_width <= HOST_BITS_PER_WIDE_INT) 8147 { 8148 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode); 8149 8150 mask >>= INTVAL (XEXP (XEXP (x, 0), 1)); 8151 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0) 8152 SUBST (XEXP (x, 0), 8153 gen_rtx_ASHIFTRT (mode, 8154 make_compound_operation (XEXP (XEXP (x, 8155 0), 8156 0), 8157 next_code), 8158 XEXP (XEXP (x, 0), 1))); 8159 } 8160 8161 /* If the constant is one less than a power of two, this might be 8162 representable by an extraction even if no shift is present. 8163 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless 8164 we are in a COMPARE. */ 8165 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0) 8166 new_rtx = make_extraction (mode, 8167 make_compound_operation (XEXP (x, 0), 8168 next_code), 8169 0, NULL_RTX, i, 1, 0, in_code == COMPARE); 8170 8171 /* If we are in a comparison and this is an AND with a power of two, 8172 convert this into the appropriate bit extract. */ 8173 else if (in_code == COMPARE 8174 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0 8175 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1)) 8176 new_rtx = make_extraction (mode, 8177 make_compound_operation (XEXP (x, 0), 8178 next_code), 8179 i, NULL_RTX, 1, 1, 0, 1); 8180 8181 /* If the one operand is a paradoxical subreg of a register or memory and 8182 the constant (limited to the smaller mode) has only zero bits where 8183 the sub expression has known zero bits, this can be expressed as 8184 a zero_extend. */ 8185 else if (GET_CODE (XEXP (x, 0)) == SUBREG) 8186 { 8187 rtx sub; 8188 8189 sub = XEXP (XEXP (x, 0), 0); 8190 machine_mode sub_mode = GET_MODE (sub); 8191 int sub_width; 8192 if ((REG_P (sub) || MEM_P (sub)) 8193 && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width) 8194 && sub_width < mode_width) 8195 { 8196 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode); 8197 unsigned HOST_WIDE_INT mask; 8198 8199 /* original AND constant with all the known zero bits set */ 8200 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode)); 8201 if ((mask & mode_mask) == mode_mask) 8202 { 8203 new_rtx = make_compound_operation (sub, next_code); 8204 new_rtx = make_extraction (mode, new_rtx, 0, 0, sub_width, 8205 1, 0, in_code == COMPARE); 8206 } 8207 } 8208 } 8209 8210 break; 8211 8212 case LSHIFTRT: 8213 /* If the sign bit is known to be zero, replace this with an 8214 arithmetic shift. */ 8215 if (have_insn_for (ASHIFTRT, mode) 8216 && ! have_insn_for (LSHIFTRT, mode) 8217 && mode_width <= HOST_BITS_PER_WIDE_INT 8218 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0) 8219 { 8220 new_rtx = gen_rtx_ASHIFTRT (mode, 8221 make_compound_operation (XEXP (x, 0), 8222 next_code), 8223 XEXP (x, 1)); 8224 break; 8225 } 8226 8227 /* fall through */ 8228 8229 case ASHIFTRT: 8230 lhs = XEXP (x, 0); 8231 rhs = XEXP (x, 1); 8232 8233 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1, 8234 this is a SIGN_EXTRACT. */ 8235 if (CONST_INT_P (rhs) 8236 && GET_CODE (lhs) == ASHIFT 8237 && CONST_INT_P (XEXP (lhs, 1)) 8238 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1)) 8239 && INTVAL (XEXP (lhs, 1)) >= 0 8240 && INTVAL (rhs) < mode_width) 8241 { 8242 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code); 8243 new_rtx = make_extraction (mode, new_rtx, 8244 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)), 8245 NULL_RTX, mode_width - INTVAL (rhs), 8246 code == LSHIFTRT, 0, in_code == COMPARE); 8247 break; 8248 } 8249 8250 /* See if we have operations between an ASHIFTRT and an ASHIFT. 8251 If so, try to merge the shifts into a SIGN_EXTEND. We could 8252 also do this for some cases of SIGN_EXTRACT, but it doesn't 8253 seem worth the effort; the case checked for occurs on Alpha. */ 8254 8255 if (!OBJECT_P (lhs) 8256 && ! (GET_CODE (lhs) == SUBREG 8257 && (OBJECT_P (SUBREG_REG (lhs)))) 8258 && CONST_INT_P (rhs) 8259 && INTVAL (rhs) >= 0 8260 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT 8261 && INTVAL (rhs) < mode_width 8262 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0) 8263 new_rtx = make_extraction (mode, make_compound_operation (new_rtx, 8264 next_code), 8265 0, NULL_RTX, mode_width - INTVAL (rhs), 8266 code == LSHIFTRT, 0, in_code == COMPARE); 8267 8268 break; 8269 8270 case SUBREG: 8271 /* Call ourselves recursively on the inner expression. If we are 8272 narrowing the object and it has a different RTL code from 8273 what it originally did, do this SUBREG as a force_to_mode. */ 8274 { 8275 rtx inner = SUBREG_REG (x), simplified; 8276 enum rtx_code subreg_code = in_code; 8277 8278 /* If the SUBREG is masking of a logical right shift, 8279 make an extraction. */ 8280 if (GET_CODE (inner) == LSHIFTRT 8281 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode) 8282 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode) 8283 && CONST_INT_P (XEXP (inner, 1)) 8284 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode) 8285 && subreg_lowpart_p (x)) 8286 { 8287 new_rtx = make_compound_operation (XEXP (inner, 0), next_code); 8288 int width = GET_MODE_PRECISION (inner_mode) 8289 - INTVAL (XEXP (inner, 1)); 8290 if (width > mode_width) 8291 width = mode_width; 8292 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1), 8293 width, 1, 0, in_code == COMPARE); 8294 break; 8295 } 8296 8297 /* If in_code is COMPARE, it isn't always safe to pass it through 8298 to the recursive make_compound_operation call. */ 8299 if (subreg_code == COMPARE 8300 && (!subreg_lowpart_p (x) 8301 || GET_CODE (inner) == SUBREG 8302 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0) 8303 is (const_int 0), rather than 8304 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0). 8305 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0) 8306 for non-equality comparisons against 0 is not equivalent 8307 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */ 8308 || (GET_CODE (inner) == AND 8309 && CONST_INT_P (XEXP (inner, 1)) 8310 && partial_subreg_p (x) 8311 && exact_log2 (UINTVAL (XEXP (inner, 1))) 8312 >= GET_MODE_BITSIZE (mode) - 1))) 8313 subreg_code = SET; 8314 8315 tem = make_compound_operation (inner, subreg_code); 8316 8317 simplified 8318 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x)); 8319 if (simplified) 8320 tem = simplified; 8321 8322 if (GET_CODE (tem) != GET_CODE (inner) 8323 && partial_subreg_p (x) 8324 && subreg_lowpart_p (x)) 8325 { 8326 rtx newer 8327 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0); 8328 8329 /* If we have something other than a SUBREG, we might have 8330 done an expansion, so rerun ourselves. */ 8331 if (GET_CODE (newer) != SUBREG) 8332 newer = make_compound_operation (newer, in_code); 8333 8334 /* force_to_mode can expand compounds. If it just re-expanded 8335 the compound, use gen_lowpart to convert to the desired 8336 mode. */ 8337 if (rtx_equal_p (newer, x) 8338 /* Likewise if it re-expanded the compound only partially. 8339 This happens for SUBREG of ZERO_EXTRACT if they extract 8340 the same number of bits. */ 8341 || (GET_CODE (newer) == SUBREG 8342 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT 8343 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT) 8344 && GET_CODE (inner) == AND 8345 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0)))) 8346 return gen_lowpart (GET_MODE (x), tem); 8347 8348 return newer; 8349 } 8350 8351 if (simplified) 8352 return tem; 8353 } 8354 break; 8355 8356 default: 8357 break; 8358 } 8359 8360 if (new_rtx) 8361 *x_ptr = gen_lowpart (mode, new_rtx); 8362 *next_code_ptr = next_code; 8363 return NULL_RTX; 8364 } 8365 8366 /* Look at the expression rooted at X. Look for expressions 8367 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND. 8368 Form these expressions. 8369 8370 Return the new rtx, usually just X. 8371 8372 Also, for machines like the VAX that don't have logical shift insns, 8373 try to convert logical to arithmetic shift operations in cases where 8374 they are equivalent. This undoes the canonicalizations to logical 8375 shifts done elsewhere. 8376 8377 We try, as much as possible, to re-use rtl expressions to save memory. 8378 8379 IN_CODE says what kind of expression we are processing. Normally, it is 8380 SET. In a memory address it is MEM. When processing the arguments of 8381 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more 8382 precisely it is an equality comparison against zero. */ 8383 8384 rtx 8385 make_compound_operation (rtx x, enum rtx_code in_code) 8386 { 8387 enum rtx_code code = GET_CODE (x); 8388 const char *fmt; 8389 int i, j; 8390 enum rtx_code next_code; 8391 rtx new_rtx, tem; 8392 8393 /* Select the code to be used in recursive calls. Once we are inside an 8394 address, we stay there. If we have a comparison, set to COMPARE, 8395 but once inside, go back to our default of SET. */ 8396 8397 next_code = (code == MEM ? MEM 8398 : ((code == COMPARE || COMPARISON_P (x)) 8399 && XEXP (x, 1) == const0_rtx) ? COMPARE 8400 : in_code == COMPARE || in_code == EQ ? SET : in_code); 8401 8402 scalar_int_mode mode; 8403 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)) 8404 { 8405 rtx new_rtx = make_compound_operation_int (mode, &x, in_code, 8406 &next_code); 8407 if (new_rtx) 8408 return new_rtx; 8409 code = GET_CODE (x); 8410 } 8411 8412 /* Now recursively process each operand of this operation. We need to 8413 handle ZERO_EXTEND specially so that we don't lose track of the 8414 inner mode. */ 8415 if (code == ZERO_EXTEND) 8416 { 8417 new_rtx = make_compound_operation (XEXP (x, 0), next_code); 8418 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x), 8419 new_rtx, GET_MODE (XEXP (x, 0))); 8420 if (tem) 8421 return tem; 8422 SUBST (XEXP (x, 0), new_rtx); 8423 return x; 8424 } 8425 8426 fmt = GET_RTX_FORMAT (code); 8427 for (i = 0; i < GET_RTX_LENGTH (code); i++) 8428 if (fmt[i] == 'e') 8429 { 8430 new_rtx = make_compound_operation (XEXP (x, i), next_code); 8431 SUBST (XEXP (x, i), new_rtx); 8432 } 8433 else if (fmt[i] == 'E') 8434 for (j = 0; j < XVECLEN (x, i); j++) 8435 { 8436 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code); 8437 SUBST (XVECEXP (x, i, j), new_rtx); 8438 } 8439 8440 maybe_swap_commutative_operands (x); 8441 return x; 8442 } 8443 8444 /* Given M see if it is a value that would select a field of bits 8445 within an item, but not the entire word. Return -1 if not. 8446 Otherwise, return the starting position of the field, where 0 is the 8447 low-order bit. 8448 8449 *PLEN is set to the length of the field. */ 8450 8451 static int 8452 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen) 8453 { 8454 /* Get the bit number of the first 1 bit from the right, -1 if none. */ 8455 int pos = m ? ctz_hwi (m) : -1; 8456 int len = 0; 8457 8458 if (pos >= 0) 8459 /* Now shift off the low-order zero bits and see if we have a 8460 power of two minus 1. */ 8461 len = exact_log2 ((m >> pos) + 1); 8462 8463 if (len <= 0) 8464 pos = -1; 8465 8466 *plen = len; 8467 return pos; 8468 } 8469 8470 /* If X refers to a register that equals REG in value, replace these 8471 references with REG. */ 8472 static rtx 8473 canon_reg_for_combine (rtx x, rtx reg) 8474 { 8475 rtx op0, op1, op2; 8476 const char *fmt; 8477 int i; 8478 bool copied; 8479 8480 enum rtx_code code = GET_CODE (x); 8481 switch (GET_RTX_CLASS (code)) 8482 { 8483 case RTX_UNARY: 8484 op0 = canon_reg_for_combine (XEXP (x, 0), reg); 8485 if (op0 != XEXP (x, 0)) 8486 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0, 8487 GET_MODE (reg)); 8488 break; 8489 8490 case RTX_BIN_ARITH: 8491 case RTX_COMM_ARITH: 8492 op0 = canon_reg_for_combine (XEXP (x, 0), reg); 8493 op1 = canon_reg_for_combine (XEXP (x, 1), reg); 8494 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) 8495 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1); 8496 break; 8497 8498 case RTX_COMPARE: 8499 case RTX_COMM_COMPARE: 8500 op0 = canon_reg_for_combine (XEXP (x, 0), reg); 8501 op1 = canon_reg_for_combine (XEXP (x, 1), reg); 8502 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) 8503 return simplify_gen_relational (GET_CODE (x), GET_MODE (x), 8504 GET_MODE (op0), op0, op1); 8505 break; 8506 8507 case RTX_TERNARY: 8508 case RTX_BITFIELD_OPS: 8509 op0 = canon_reg_for_combine (XEXP (x, 0), reg); 8510 op1 = canon_reg_for_combine (XEXP (x, 1), reg); 8511 op2 = canon_reg_for_combine (XEXP (x, 2), reg); 8512 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2)) 8513 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x), 8514 GET_MODE (op0), op0, op1, op2); 8515 /* FALLTHRU */ 8516 8517 case RTX_OBJ: 8518 if (REG_P (x)) 8519 { 8520 if (rtx_equal_p (get_last_value (reg), x) 8521 || rtx_equal_p (reg, get_last_value (x))) 8522 return reg; 8523 else 8524 break; 8525 } 8526 8527 /* fall through */ 8528 8529 default: 8530 fmt = GET_RTX_FORMAT (code); 8531 copied = false; 8532 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) 8533 if (fmt[i] == 'e') 8534 { 8535 rtx op = canon_reg_for_combine (XEXP (x, i), reg); 8536 if (op != XEXP (x, i)) 8537 { 8538 if (!copied) 8539 { 8540 copied = true; 8541 x = copy_rtx (x); 8542 } 8543 XEXP (x, i) = op; 8544 } 8545 } 8546 else if (fmt[i] == 'E') 8547 { 8548 int j; 8549 for (j = 0; j < XVECLEN (x, i); j++) 8550 { 8551 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg); 8552 if (op != XVECEXP (x, i, j)) 8553 { 8554 if (!copied) 8555 { 8556 copied = true; 8557 x = copy_rtx (x); 8558 } 8559 XVECEXP (x, i, j) = op; 8560 } 8561 } 8562 } 8563 8564 break; 8565 } 8566 8567 return x; 8568 } 8569 8570 /* Return X converted to MODE. If the value is already truncated to 8571 MODE we can just return a subreg even though in the general case we 8572 would need an explicit truncation. */ 8573 8574 static rtx 8575 gen_lowpart_or_truncate (machine_mode mode, rtx x) 8576 { 8577 if (!CONST_INT_P (x) 8578 && partial_subreg_p (mode, GET_MODE (x)) 8579 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x)) 8580 && !(REG_P (x) && reg_truncated_to_mode (mode, x))) 8581 { 8582 /* Bit-cast X into an integer mode. */ 8583 if (!SCALAR_INT_MODE_P (GET_MODE (x))) 8584 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x); 8585 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (), 8586 x, GET_MODE (x)); 8587 } 8588 8589 return gen_lowpart (mode, x); 8590 } 8591 8592 /* See if X can be simplified knowing that we will only refer to it in 8593 MODE and will only refer to those bits that are nonzero in MASK. 8594 If other bits are being computed or if masking operations are done 8595 that select a superset of the bits in MASK, they can sometimes be 8596 ignored. 8597 8598 Return a possibly simplified expression, but always convert X to 8599 MODE. If X is a CONST_INT, AND the CONST_INT with MASK. 8600 8601 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK 8602 are all off in X. This is used when X will be complemented, by either 8603 NOT, NEG, or XOR. */ 8604 8605 static rtx 8606 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, 8607 int just_select) 8608 { 8609 enum rtx_code code = GET_CODE (x); 8610 int next_select = just_select || code == XOR || code == NOT || code == NEG; 8611 machine_mode op_mode; 8612 unsigned HOST_WIDE_INT nonzero; 8613 8614 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the 8615 code below will do the wrong thing since the mode of such an 8616 expression is VOIDmode. 8617 8618 Also do nothing if X is a CLOBBER; this can happen if X was 8619 the return value from a call to gen_lowpart. */ 8620 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER) 8621 return x; 8622 8623 /* We want to perform the operation in its present mode unless we know 8624 that the operation is valid in MODE, in which case we do the operation 8625 in MODE. */ 8626 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x)) 8627 && have_insn_for (code, mode)) 8628 ? mode : GET_MODE (x)); 8629 8630 /* It is not valid to do a right-shift in a narrower mode 8631 than the one it came in with. */ 8632 if ((code == LSHIFTRT || code == ASHIFTRT) 8633 && partial_subreg_p (mode, GET_MODE (x))) 8634 op_mode = GET_MODE (x); 8635 8636 /* Truncate MASK to fit OP_MODE. */ 8637 if (op_mode) 8638 mask &= GET_MODE_MASK (op_mode); 8639 8640 /* Determine what bits of X are guaranteed to be (non)zero. */ 8641 nonzero = nonzero_bits (x, mode); 8642 8643 /* If none of the bits in X are needed, return a zero. */ 8644 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x)) 8645 x = const0_rtx; 8646 8647 /* If X is a CONST_INT, return a new one. Do this here since the 8648 test below will fail. */ 8649 if (CONST_INT_P (x)) 8650 { 8651 if (SCALAR_INT_MODE_P (mode)) 8652 return gen_int_mode (INTVAL (x) & mask, mode); 8653 else 8654 { 8655 x = GEN_INT (INTVAL (x) & mask); 8656 return gen_lowpart_common (mode, x); 8657 } 8658 } 8659 8660 /* If X is narrower than MODE and we want all the bits in X's mode, just 8661 get X in the proper mode. */ 8662 if (paradoxical_subreg_p (mode, GET_MODE (x)) 8663 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0) 8664 return gen_lowpart (mode, x); 8665 8666 /* We can ignore the effect of a SUBREG if it narrows the mode or 8667 if the constant masks to zero all the bits the mode doesn't have. */ 8668 if (GET_CODE (x) == SUBREG 8669 && subreg_lowpart_p (x) 8670 && (partial_subreg_p (x) 8671 || (mask 8672 & GET_MODE_MASK (GET_MODE (x)) 8673 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0)) 8674 return force_to_mode (SUBREG_REG (x), mode, mask, next_select); 8675 8676 scalar_int_mode int_mode, xmode; 8677 if (is_a <scalar_int_mode> (mode, &int_mode) 8678 && is_a <scalar_int_mode> (GET_MODE (x), &xmode)) 8679 /* OP_MODE is either MODE or XMODE, so it must be a scalar 8680 integer too. */ 8681 return force_int_to_mode (x, int_mode, xmode, 8682 as_a <scalar_int_mode> (op_mode), 8683 mask, just_select); 8684 8685 return gen_lowpart_or_truncate (mode, x); 8686 } 8687 8688 /* Subroutine of force_to_mode that handles cases in which both X and 8689 the result are scalar integers. MODE is the mode of the result, 8690 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE 8691 is preferred for simplified versions of X. The other arguments 8692 are as for force_to_mode. */ 8693 8694 static rtx 8695 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode, 8696 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask, 8697 int just_select) 8698 { 8699 enum rtx_code code = GET_CODE (x); 8700 int next_select = just_select || code == XOR || code == NOT || code == NEG; 8701 unsigned HOST_WIDE_INT fuller_mask; 8702 rtx op0, op1, temp; 8703 8704 /* When we have an arithmetic operation, or a shift whose count we 8705 do not know, we need to assume that all bits up to the highest-order 8706 bit in MASK will be needed. This is how we form such a mask. */ 8707 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) 8708 fuller_mask = HOST_WIDE_INT_M1U; 8709 else 8710 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1)) 8711 - 1); 8712 8713 switch (code) 8714 { 8715 case CLOBBER: 8716 /* If X is a (clobber (const_int)), return it since we know we are 8717 generating something that won't match. */ 8718 return x; 8719 8720 case SIGN_EXTEND: 8721 case ZERO_EXTEND: 8722 case ZERO_EXTRACT: 8723 case SIGN_EXTRACT: 8724 x = expand_compound_operation (x); 8725 if (GET_CODE (x) != code) 8726 return force_to_mode (x, mode, mask, next_select); 8727 break; 8728 8729 case TRUNCATE: 8730 /* Similarly for a truncate. */ 8731 return force_to_mode (XEXP (x, 0), mode, mask, next_select); 8732 8733 case AND: 8734 /* If this is an AND with a constant, convert it into an AND 8735 whose constant is the AND of that constant with MASK. If it 8736 remains an AND of MASK, delete it since it is redundant. */ 8737 8738 if (CONST_INT_P (XEXP (x, 1))) 8739 { 8740 x = simplify_and_const_int (x, op_mode, XEXP (x, 0), 8741 mask & INTVAL (XEXP (x, 1))); 8742 xmode = op_mode; 8743 8744 /* If X is still an AND, see if it is an AND with a mask that 8745 is just some low-order bits. If so, and it is MASK, we don't 8746 need it. */ 8747 8748 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)) 8749 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask) 8750 x = XEXP (x, 0); 8751 8752 /* If it remains an AND, try making another AND with the bits 8753 in the mode mask that aren't in MASK turned on. If the 8754 constant in the AND is wide enough, this might make a 8755 cheaper constant. */ 8756 8757 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)) 8758 && GET_MODE_MASK (xmode) != mask 8759 && HWI_COMPUTABLE_MODE_P (xmode)) 8760 { 8761 unsigned HOST_WIDE_INT cval 8762 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask); 8763 rtx y; 8764 8765 y = simplify_gen_binary (AND, xmode, XEXP (x, 0), 8766 gen_int_mode (cval, xmode)); 8767 if (set_src_cost (y, xmode, optimize_this_for_speed_p) 8768 < set_src_cost (x, xmode, optimize_this_for_speed_p)) 8769 x = y; 8770 } 8771 8772 break; 8773 } 8774 8775 goto binop; 8776 8777 case PLUS: 8778 /* In (and (plus FOO C1) M), if M is a mask that just turns off 8779 low-order bits (as in an alignment operation) and FOO is already 8780 aligned to that boundary, mask C1 to that boundary as well. 8781 This may eliminate that PLUS and, later, the AND. */ 8782 8783 { 8784 unsigned int width = GET_MODE_PRECISION (mode); 8785 unsigned HOST_WIDE_INT smask = mask; 8786 8787 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative 8788 number, sign extend it. */ 8789 8790 if (width < HOST_BITS_PER_WIDE_INT 8791 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0) 8792 smask |= HOST_WIDE_INT_M1U << width; 8793 8794 if (CONST_INT_P (XEXP (x, 1)) 8795 && pow2p_hwi (- smask) 8796 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0 8797 && (INTVAL (XEXP (x, 1)) & ~smask) != 0) 8798 return force_to_mode (plus_constant (xmode, XEXP (x, 0), 8799 (INTVAL (XEXP (x, 1)) & smask)), 8800 mode, smask, next_select); 8801 } 8802 8803 /* fall through */ 8804 8805 case MULT: 8806 /* Substituting into the operands of a widening MULT is not likely to 8807 create RTL matching a machine insn. */ 8808 if (code == MULT 8809 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND 8810 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) 8811 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND 8812 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND) 8813 && REG_P (XEXP (XEXP (x, 0), 0)) 8814 && REG_P (XEXP (XEXP (x, 1), 0))) 8815 return gen_lowpart_or_truncate (mode, x); 8816 8817 /* For PLUS, MINUS and MULT, we need any bits less significant than the 8818 most significant bit in MASK since carries from those bits will 8819 affect the bits we are interested in. */ 8820 mask = fuller_mask; 8821 goto binop; 8822 8823 case MINUS: 8824 /* If X is (minus C Y) where C's least set bit is larger than any bit 8825 in the mask, then we may replace with (neg Y). */ 8826 if (CONST_INT_P (XEXP (x, 0)) 8827 && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask) 8828 { 8829 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode); 8830 return force_to_mode (x, mode, mask, next_select); 8831 } 8832 8833 /* Similarly, if C contains every bit in the fuller_mask, then we may 8834 replace with (not Y). */ 8835 if (CONST_INT_P (XEXP (x, 0)) 8836 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0)))) 8837 { 8838 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode); 8839 return force_to_mode (x, mode, mask, next_select); 8840 } 8841 8842 mask = fuller_mask; 8843 goto binop; 8844 8845 case IOR: 8846 case XOR: 8847 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and 8848 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...) 8849 operation which may be a bitfield extraction. Ensure that the 8850 constant we form is not wider than the mode of X. */ 8851 8852 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT 8853 && CONST_INT_P (XEXP (XEXP (x, 0), 1)) 8854 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 8855 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT 8856 && CONST_INT_P (XEXP (x, 1)) 8857 && ((INTVAL (XEXP (XEXP (x, 0), 1)) 8858 + floor_log2 (INTVAL (XEXP (x, 1)))) 8859 < GET_MODE_PRECISION (xmode)) 8860 && (UINTVAL (XEXP (x, 1)) 8861 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0) 8862 { 8863 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask) 8864 << INTVAL (XEXP (XEXP (x, 0), 1)), 8865 xmode); 8866 temp = simplify_gen_binary (GET_CODE (x), xmode, 8867 XEXP (XEXP (x, 0), 0), temp); 8868 x = simplify_gen_binary (LSHIFTRT, xmode, temp, 8869 XEXP (XEXP (x, 0), 1)); 8870 return force_to_mode (x, mode, mask, next_select); 8871 } 8872 8873 binop: 8874 /* For most binary operations, just propagate into the operation and 8875 change the mode if we have an operation of that mode. */ 8876 8877 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select); 8878 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select); 8879 8880 /* If we ended up truncating both operands, truncate the result of the 8881 operation instead. */ 8882 if (GET_CODE (op0) == TRUNCATE 8883 && GET_CODE (op1) == TRUNCATE) 8884 { 8885 op0 = XEXP (op0, 0); 8886 op1 = XEXP (op1, 0); 8887 } 8888 8889 op0 = gen_lowpart_or_truncate (op_mode, op0); 8890 op1 = gen_lowpart_or_truncate (op_mode, op1); 8891 8892 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) 8893 { 8894 x = simplify_gen_binary (code, op_mode, op0, op1); 8895 xmode = op_mode; 8896 } 8897 break; 8898 8899 case ASHIFT: 8900 /* For left shifts, do the same, but just for the first operand. 8901 However, we cannot do anything with shifts where we cannot 8902 guarantee that the counts are smaller than the size of the mode 8903 because such a count will have a different meaning in a 8904 wider mode. */ 8905 8906 if (! (CONST_INT_P (XEXP (x, 1)) 8907 && INTVAL (XEXP (x, 1)) >= 0 8908 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode)) 8909 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode 8910 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1))) 8911 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode)))) 8912 break; 8913 8914 /* If the shift count is a constant and we can do arithmetic in 8915 the mode of the shift, refine which bits we need. Otherwise, use the 8916 conservative form of the mask. */ 8917 if (CONST_INT_P (XEXP (x, 1)) 8918 && INTVAL (XEXP (x, 1)) >= 0 8919 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode) 8920 && HWI_COMPUTABLE_MODE_P (op_mode)) 8921 mask >>= INTVAL (XEXP (x, 1)); 8922 else 8923 mask = fuller_mask; 8924 8925 op0 = gen_lowpart_or_truncate (op_mode, 8926 force_to_mode (XEXP (x, 0), mode, 8927 mask, next_select)); 8928 8929 if (op_mode != xmode || op0 != XEXP (x, 0)) 8930 { 8931 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1)); 8932 xmode = op_mode; 8933 } 8934 break; 8935 8936 case LSHIFTRT: 8937 /* Here we can only do something if the shift count is a constant, 8938 this shift constant is valid for the host, and we can do arithmetic 8939 in OP_MODE. */ 8940 8941 if (CONST_INT_P (XEXP (x, 1)) 8942 && INTVAL (XEXP (x, 1)) >= 0 8943 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT 8944 && HWI_COMPUTABLE_MODE_P (op_mode)) 8945 { 8946 rtx inner = XEXP (x, 0); 8947 unsigned HOST_WIDE_INT inner_mask; 8948 8949 /* Select the mask of the bits we need for the shift operand. */ 8950 inner_mask = mask << INTVAL (XEXP (x, 1)); 8951 8952 /* We can only change the mode of the shift if we can do arithmetic 8953 in the mode of the shift and INNER_MASK is no wider than the 8954 width of X's mode. */ 8955 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0) 8956 op_mode = xmode; 8957 8958 inner = force_to_mode (inner, op_mode, inner_mask, next_select); 8959 8960 if (xmode != op_mode || inner != XEXP (x, 0)) 8961 { 8962 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1)); 8963 xmode = op_mode; 8964 } 8965 } 8966 8967 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the 8968 shift and AND produces only copies of the sign bit (C2 is one less 8969 than a power of two), we can do this with just a shift. */ 8970 8971 if (GET_CODE (x) == LSHIFTRT 8972 && CONST_INT_P (XEXP (x, 1)) 8973 /* The shift puts one of the sign bit copies in the least significant 8974 bit. */ 8975 && ((INTVAL (XEXP (x, 1)) 8976 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))) 8977 >= GET_MODE_PRECISION (xmode)) 8978 && pow2p_hwi (mask + 1) 8979 /* Number of bits left after the shift must be more than the mask 8980 needs. */ 8981 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1)) 8982 <= GET_MODE_PRECISION (xmode)) 8983 /* Must be more sign bit copies than the mask needs. */ 8984 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))) 8985 >= exact_log2 (mask + 1))) 8986 { 8987 int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1); 8988 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), 8989 gen_int_shift_amount (xmode, nbits)); 8990 } 8991 goto shiftrt; 8992 8993 case ASHIFTRT: 8994 /* If we are just looking for the sign bit, we don't need this shift at 8995 all, even if it has a variable count. */ 8996 if (val_signbit_p (xmode, mask)) 8997 return force_to_mode (XEXP (x, 0), mode, mask, next_select); 8998 8999 /* If this is a shift by a constant, get a mask that contains those bits 9000 that are not copies of the sign bit. We then have two cases: If 9001 MASK only includes those bits, this can be a logical shift, which may 9002 allow simplifications. If MASK is a single-bit field not within 9003 those bits, we are requesting a copy of the sign bit and hence can 9004 shift the sign bit to the appropriate location. */ 9005 9006 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0 9007 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT) 9008 { 9009 unsigned HOST_WIDE_INT nonzero; 9010 int i; 9011 9012 /* If the considered data is wider than HOST_WIDE_INT, we can't 9013 represent a mask for all its bits in a single scalar. 9014 But we only care about the lower bits, so calculate these. */ 9015 9016 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT) 9017 { 9018 nonzero = HOST_WIDE_INT_M1U; 9019 9020 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1)) 9021 is the number of bits a full-width mask would have set. 9022 We need only shift if these are fewer than nonzero can 9023 hold. If not, we must keep all bits set in nonzero. */ 9024 9025 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1)) 9026 < HOST_BITS_PER_WIDE_INT) 9027 nonzero >>= INTVAL (XEXP (x, 1)) 9028 + HOST_BITS_PER_WIDE_INT 9029 - GET_MODE_PRECISION (xmode); 9030 } 9031 else 9032 { 9033 nonzero = GET_MODE_MASK (xmode); 9034 nonzero >>= INTVAL (XEXP (x, 1)); 9035 } 9036 9037 if ((mask & ~nonzero) == 0) 9038 { 9039 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode, 9040 XEXP (x, 0), INTVAL (XEXP (x, 1))); 9041 if (GET_CODE (x) != ASHIFTRT) 9042 return force_to_mode (x, mode, mask, next_select); 9043 } 9044 9045 else if ((i = exact_log2 (mask)) >= 0) 9046 { 9047 x = simplify_shift_const 9048 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0), 9049 GET_MODE_PRECISION (xmode) - 1 - i); 9050 9051 if (GET_CODE (x) != ASHIFTRT) 9052 return force_to_mode (x, mode, mask, next_select); 9053 } 9054 } 9055 9056 /* If MASK is 1, convert this to an LSHIFTRT. This can be done 9057 even if the shift count isn't a constant. */ 9058 if (mask == 1) 9059 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1)); 9060 9061 shiftrt: 9062 9063 /* If this is a zero- or sign-extension operation that just affects bits 9064 we don't care about, remove it. Be sure the call above returned 9065 something that is still a shift. */ 9066 9067 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT) 9068 && CONST_INT_P (XEXP (x, 1)) 9069 && INTVAL (XEXP (x, 1)) >= 0 9070 && (INTVAL (XEXP (x, 1)) 9071 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1)) 9072 && GET_CODE (XEXP (x, 0)) == ASHIFT 9073 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1)) 9074 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask, 9075 next_select); 9076 9077 break; 9078 9079 case ROTATE: 9080 case ROTATERT: 9081 /* If the shift count is constant and we can do computations 9082 in the mode of X, compute where the bits we care about are. 9083 Otherwise, we can't do anything. Don't change the mode of 9084 the shift or propagate MODE into the shift, though. */ 9085 if (CONST_INT_P (XEXP (x, 1)) 9086 && INTVAL (XEXP (x, 1)) >= 0) 9087 { 9088 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE, 9089 xmode, gen_int_mode (mask, xmode), 9090 XEXP (x, 1)); 9091 if (temp && CONST_INT_P (temp)) 9092 x = simplify_gen_binary (code, xmode, 9093 force_to_mode (XEXP (x, 0), xmode, 9094 INTVAL (temp), next_select), 9095 XEXP (x, 1)); 9096 } 9097 break; 9098 9099 case NEG: 9100 /* If we just want the low-order bit, the NEG isn't needed since it 9101 won't change the low-order bit. */ 9102 if (mask == 1) 9103 return force_to_mode (XEXP (x, 0), mode, mask, just_select); 9104 9105 /* We need any bits less significant than the most significant bit in 9106 MASK since carries from those bits will affect the bits we are 9107 interested in. */ 9108 mask = fuller_mask; 9109 goto unop; 9110 9111 case NOT: 9112 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the 9113 same as the XOR case above. Ensure that the constant we form is not 9114 wider than the mode of X. */ 9115 9116 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT 9117 && CONST_INT_P (XEXP (XEXP (x, 0), 1)) 9118 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 9119 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask) 9120 < GET_MODE_PRECISION (xmode)) 9121 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT) 9122 { 9123 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode); 9124 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp); 9125 x = simplify_gen_binary (LSHIFTRT, xmode, 9126 temp, XEXP (XEXP (x, 0), 1)); 9127 9128 return force_to_mode (x, mode, mask, next_select); 9129 } 9130 9131 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must 9132 use the full mask inside the NOT. */ 9133 mask = fuller_mask; 9134 9135 unop: 9136 op0 = gen_lowpart_or_truncate (op_mode, 9137 force_to_mode (XEXP (x, 0), mode, mask, 9138 next_select)); 9139 if (op_mode != xmode || op0 != XEXP (x, 0)) 9140 { 9141 x = simplify_gen_unary (code, op_mode, op0, op_mode); 9142 xmode = op_mode; 9143 } 9144 break; 9145 9146 case NE: 9147 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included 9148 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero, 9149 which is equal to STORE_FLAG_VALUE. */ 9150 if ((mask & ~STORE_FLAG_VALUE) == 0 9151 && XEXP (x, 1) == const0_rtx 9152 && GET_MODE (XEXP (x, 0)) == mode 9153 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode)) 9154 && (nonzero_bits (XEXP (x, 0), mode) 9155 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE)) 9156 return force_to_mode (XEXP (x, 0), mode, mask, next_select); 9157 9158 break; 9159 9160 case IF_THEN_ELSE: 9161 /* We have no way of knowing if the IF_THEN_ELSE can itself be 9162 written in a narrower mode. We play it safe and do not do so. */ 9163 9164 op0 = gen_lowpart_or_truncate (xmode, 9165 force_to_mode (XEXP (x, 1), mode, 9166 mask, next_select)); 9167 op1 = gen_lowpart_or_truncate (xmode, 9168 force_to_mode (XEXP (x, 2), mode, 9169 mask, next_select)); 9170 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2)) 9171 x = simplify_gen_ternary (IF_THEN_ELSE, xmode, 9172 GET_MODE (XEXP (x, 0)), XEXP (x, 0), 9173 op0, op1); 9174 break; 9175 9176 default: 9177 break; 9178 } 9179 9180 /* Ensure we return a value of the proper mode. */ 9181 return gen_lowpart_or_truncate (mode, x); 9182 } 9183 9184 /* Return nonzero if X is an expression that has one of two values depending on 9185 whether some other value is zero or nonzero. In that case, we return the 9186 value that is being tested, *PTRUE is set to the value if the rtx being 9187 returned has a nonzero value, and *PFALSE is set to the other alternative. 9188 9189 If we return zero, we set *PTRUE and *PFALSE to X. */ 9190 9191 static rtx 9192 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse) 9193 { 9194 machine_mode mode = GET_MODE (x); 9195 enum rtx_code code = GET_CODE (x); 9196 rtx cond0, cond1, true0, true1, false0, false1; 9197 unsigned HOST_WIDE_INT nz; 9198 scalar_int_mode int_mode; 9199 9200 /* If we are comparing a value against zero, we are done. */ 9201 if ((code == NE || code == EQ) 9202 && XEXP (x, 1) == const0_rtx) 9203 { 9204 *ptrue = (code == NE) ? const_true_rtx : const0_rtx; 9205 *pfalse = (code == NE) ? const0_rtx : const_true_rtx; 9206 return XEXP (x, 0); 9207 } 9208 9209 /* If this is a unary operation whose operand has one of two values, apply 9210 our opcode to compute those values. */ 9211 else if (UNARY_P (x) 9212 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0) 9213 { 9214 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0))); 9215 *pfalse = simplify_gen_unary (code, mode, false0, 9216 GET_MODE (XEXP (x, 0))); 9217 return cond0; 9218 } 9219 9220 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would 9221 make can't possibly match and would suppress other optimizations. */ 9222 else if (code == COMPARE) 9223 ; 9224 9225 /* If this is a binary operation, see if either side has only one of two 9226 values. If either one does or if both do and they are conditional on 9227 the same value, compute the new true and false values. */ 9228 else if (BINARY_P (x)) 9229 { 9230 rtx op0 = XEXP (x, 0); 9231 rtx op1 = XEXP (x, 1); 9232 cond0 = if_then_else_cond (op0, &true0, &false0); 9233 cond1 = if_then_else_cond (op1, &true1, &false1); 9234 9235 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)) 9236 && (REG_P (op0) || REG_P (op1))) 9237 { 9238 /* Try to enable a simplification by undoing work done by 9239 if_then_else_cond if it converted a REG into something more 9240 complex. */ 9241 if (REG_P (op0)) 9242 { 9243 cond0 = 0; 9244 true0 = false0 = op0; 9245 } 9246 else 9247 { 9248 cond1 = 0; 9249 true1 = false1 = op1; 9250 } 9251 } 9252 9253 if ((cond0 != 0 || cond1 != 0) 9254 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))) 9255 { 9256 /* If if_then_else_cond returned zero, then true/false are the 9257 same rtl. We must copy one of them to prevent invalid rtl 9258 sharing. */ 9259 if (cond0 == 0) 9260 true0 = copy_rtx (true0); 9261 else if (cond1 == 0) 9262 true1 = copy_rtx (true1); 9263 9264 if (COMPARISON_P (x)) 9265 { 9266 *ptrue = simplify_gen_relational (code, mode, VOIDmode, 9267 true0, true1); 9268 *pfalse = simplify_gen_relational (code, mode, VOIDmode, 9269 false0, false1); 9270 } 9271 else 9272 { 9273 *ptrue = simplify_gen_binary (code, mode, true0, true1); 9274 *pfalse = simplify_gen_binary (code, mode, false0, false1); 9275 } 9276 9277 return cond0 ? cond0 : cond1; 9278 } 9279 9280 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the 9281 operands is zero when the other is nonzero, and vice-versa, 9282 and STORE_FLAG_VALUE is 1 or -1. */ 9283 9284 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) 9285 && (code == PLUS || code == IOR || code == XOR || code == MINUS 9286 || code == UMAX) 9287 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT) 9288 { 9289 rtx op0 = XEXP (XEXP (x, 0), 1); 9290 rtx op1 = XEXP (XEXP (x, 1), 1); 9291 9292 cond0 = XEXP (XEXP (x, 0), 0); 9293 cond1 = XEXP (XEXP (x, 1), 0); 9294 9295 if (COMPARISON_P (cond0) 9296 && COMPARISON_P (cond1) 9297 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL) 9298 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0)) 9299 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1))) 9300 || ((swap_condition (GET_CODE (cond0)) 9301 == reversed_comparison_code (cond1, NULL)) 9302 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1)) 9303 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0)))) 9304 && ! side_effects_p (x)) 9305 { 9306 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx); 9307 *pfalse = simplify_gen_binary (MULT, mode, 9308 (code == MINUS 9309 ? simplify_gen_unary (NEG, mode, 9310 op1, mode) 9311 : op1), 9312 const_true_rtx); 9313 return cond0; 9314 } 9315 } 9316 9317 /* Similarly for MULT, AND and UMIN, except that for these the result 9318 is always zero. */ 9319 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) 9320 && (code == MULT || code == AND || code == UMIN) 9321 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT) 9322 { 9323 cond0 = XEXP (XEXP (x, 0), 0); 9324 cond1 = XEXP (XEXP (x, 1), 0); 9325 9326 if (COMPARISON_P (cond0) 9327 && COMPARISON_P (cond1) 9328 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL) 9329 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0)) 9330 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1))) 9331 || ((swap_condition (GET_CODE (cond0)) 9332 == reversed_comparison_code (cond1, NULL)) 9333 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1)) 9334 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0)))) 9335 && ! side_effects_p (x)) 9336 { 9337 *ptrue = *pfalse = const0_rtx; 9338 return cond0; 9339 } 9340 } 9341 } 9342 9343 else if (code == IF_THEN_ELSE) 9344 { 9345 /* If we have IF_THEN_ELSE already, extract the condition and 9346 canonicalize it if it is NE or EQ. */ 9347 cond0 = XEXP (x, 0); 9348 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2); 9349 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx) 9350 return XEXP (cond0, 0); 9351 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx) 9352 { 9353 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1); 9354 return XEXP (cond0, 0); 9355 } 9356 else 9357 return cond0; 9358 } 9359 9360 /* If X is a SUBREG, we can narrow both the true and false values 9361 if the inner expression, if there is a condition. */ 9362 else if (code == SUBREG 9363 && (cond0 = if_then_else_cond (SUBREG_REG (x), &true0, 9364 &false0)) != 0) 9365 { 9366 true0 = simplify_gen_subreg (mode, true0, 9367 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); 9368 false0 = simplify_gen_subreg (mode, false0, 9369 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); 9370 if (true0 && false0) 9371 { 9372 *ptrue = true0; 9373 *pfalse = false0; 9374 return cond0; 9375 } 9376 } 9377 9378 /* If X is a constant, this isn't special and will cause confusions 9379 if we treat it as such. Likewise if it is equivalent to a constant. */ 9380 else if (CONSTANT_P (x) 9381 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0))) 9382 ; 9383 9384 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that 9385 will be least confusing to the rest of the compiler. */ 9386 else if (mode == BImode) 9387 { 9388 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx; 9389 return x; 9390 } 9391 9392 /* If X is known to be either 0 or -1, those are the true and 9393 false values when testing X. */ 9394 else if (x == constm1_rtx || x == const0_rtx 9395 || (is_a <scalar_int_mode> (mode, &int_mode) 9396 && (num_sign_bit_copies (x, int_mode) 9397 == GET_MODE_PRECISION (int_mode)))) 9398 { 9399 *ptrue = constm1_rtx, *pfalse = const0_rtx; 9400 return x; 9401 } 9402 9403 /* Likewise for 0 or a single bit. */ 9404 else if (HWI_COMPUTABLE_MODE_P (mode) 9405 && pow2p_hwi (nz = nonzero_bits (x, mode))) 9406 { 9407 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx; 9408 return x; 9409 } 9410 9411 /* Otherwise fail; show no condition with true and false values the same. */ 9412 *ptrue = *pfalse = x; 9413 return 0; 9414 } 9415 9416 /* Return the value of expression X given the fact that condition COND 9417 is known to be true when applied to REG as its first operand and VAL 9418 as its second. X is known to not be shared and so can be modified in 9419 place. 9420 9421 We only handle the simplest cases, and specifically those cases that 9422 arise with IF_THEN_ELSE expressions. */ 9423 9424 static rtx 9425 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val) 9426 { 9427 enum rtx_code code = GET_CODE (x); 9428 const char *fmt; 9429 int i, j; 9430 9431 if (side_effects_p (x)) 9432 return x; 9433 9434 /* If either operand of the condition is a floating point value, 9435 then we have to avoid collapsing an EQ comparison. */ 9436 if (cond == EQ 9437 && rtx_equal_p (x, reg) 9438 && ! FLOAT_MODE_P (GET_MODE (x)) 9439 && ! FLOAT_MODE_P (GET_MODE (val))) 9440 return val; 9441 9442 if (cond == UNEQ && rtx_equal_p (x, reg)) 9443 return val; 9444 9445 /* If X is (abs REG) and we know something about REG's relationship 9446 with zero, we may be able to simplify this. */ 9447 9448 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx) 9449 switch (cond) 9450 { 9451 case GE: case GT: case EQ: 9452 return XEXP (x, 0); 9453 case LT: case LE: 9454 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)), 9455 XEXP (x, 0), 9456 GET_MODE (XEXP (x, 0))); 9457 default: 9458 break; 9459 } 9460 9461 /* The only other cases we handle are MIN, MAX, and comparisons if the 9462 operands are the same as REG and VAL. */ 9463 9464 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x)) 9465 { 9466 if (rtx_equal_p (XEXP (x, 0), val)) 9467 { 9468 std::swap (val, reg); 9469 cond = swap_condition (cond); 9470 } 9471 9472 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val)) 9473 { 9474 if (COMPARISON_P (x)) 9475 { 9476 if (comparison_dominates_p (cond, code)) 9477 return const_true_rtx; 9478 9479 code = reversed_comparison_code (x, NULL); 9480 if (code != UNKNOWN 9481 && comparison_dominates_p (cond, code)) 9482 return const0_rtx; 9483 else 9484 return x; 9485 } 9486 else if (code == SMAX || code == SMIN 9487 || code == UMIN || code == UMAX) 9488 { 9489 int unsignedp = (code == UMIN || code == UMAX); 9490 9491 /* Do not reverse the condition when it is NE or EQ. 9492 This is because we cannot conclude anything about 9493 the value of 'SMAX (x, y)' when x is not equal to y, 9494 but we can when x equals y. */ 9495 if ((code == SMAX || code == UMAX) 9496 && ! (cond == EQ || cond == NE)) 9497 cond = reverse_condition (cond); 9498 9499 switch (cond) 9500 { 9501 case GE: case GT: 9502 return unsignedp ? x : XEXP (x, 1); 9503 case LE: case LT: 9504 return unsignedp ? x : XEXP (x, 0); 9505 case GEU: case GTU: 9506 return unsignedp ? XEXP (x, 1) : x; 9507 case LEU: case LTU: 9508 return unsignedp ? XEXP (x, 0) : x; 9509 default: 9510 break; 9511 } 9512 } 9513 } 9514 } 9515 else if (code == SUBREG) 9516 { 9517 machine_mode inner_mode = GET_MODE (SUBREG_REG (x)); 9518 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val); 9519 9520 if (SUBREG_REG (x) != r) 9521 { 9522 /* We must simplify subreg here, before we lose track of the 9523 original inner_mode. */ 9524 new_rtx = simplify_subreg (GET_MODE (x), r, 9525 inner_mode, SUBREG_BYTE (x)); 9526 if (new_rtx) 9527 return new_rtx; 9528 else 9529 SUBST (SUBREG_REG (x), r); 9530 } 9531 9532 return x; 9533 } 9534 /* We don't have to handle SIGN_EXTEND here, because even in the 9535 case of replacing something with a modeless CONST_INT, a 9536 CONST_INT is already (supposed to be) a valid sign extension for 9537 its narrower mode, which implies it's already properly 9538 sign-extended for the wider mode. Now, for ZERO_EXTEND, the 9539 story is different. */ 9540 else if (code == ZERO_EXTEND) 9541 { 9542 machine_mode inner_mode = GET_MODE (XEXP (x, 0)); 9543 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val); 9544 9545 if (XEXP (x, 0) != r) 9546 { 9547 /* We must simplify the zero_extend here, before we lose 9548 track of the original inner_mode. */ 9549 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x), 9550 r, inner_mode); 9551 if (new_rtx) 9552 return new_rtx; 9553 else 9554 SUBST (XEXP (x, 0), r); 9555 } 9556 9557 return x; 9558 } 9559 9560 fmt = GET_RTX_FORMAT (code); 9561 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) 9562 { 9563 if (fmt[i] == 'e') 9564 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val)); 9565 else if (fmt[i] == 'E') 9566 for (j = XVECLEN (x, i) - 1; j >= 0; j--) 9567 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j), 9568 cond, reg, val)); 9569 } 9570 9571 return x; 9572 } 9573 9574 /* See if X and Y are equal for the purposes of seeing if we can rewrite an 9575 assignment as a field assignment. */ 9576 9577 static int 9578 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x) 9579 { 9580 if (widen_x && GET_MODE (x) != GET_MODE (y)) 9581 { 9582 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y))) 9583 return 0; 9584 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN) 9585 return 0; 9586 x = adjust_address_nv (x, GET_MODE (y), 9587 byte_lowpart_offset (GET_MODE (y), 9588 GET_MODE (x))); 9589 } 9590 9591 if (x == y || rtx_equal_p (x, y)) 9592 return 1; 9593 9594 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y)) 9595 return 0; 9596 9597 /* Check for a paradoxical SUBREG of a MEM compared with the MEM. 9598 Note that all SUBREGs of MEM are paradoxical; otherwise they 9599 would have been rewritten. */ 9600 if (MEM_P (x) && GET_CODE (y) == SUBREG 9601 && MEM_P (SUBREG_REG (y)) 9602 && rtx_equal_p (SUBREG_REG (y), 9603 gen_lowpart (GET_MODE (SUBREG_REG (y)), x))) 9604 return 1; 9605 9606 if (MEM_P (y) && GET_CODE (x) == SUBREG 9607 && MEM_P (SUBREG_REG (x)) 9608 && rtx_equal_p (SUBREG_REG (x), 9609 gen_lowpart (GET_MODE (SUBREG_REG (x)), y))) 9610 return 1; 9611 9612 /* We used to see if get_last_value of X and Y were the same but that's 9613 not correct. In one direction, we'll cause the assignment to have 9614 the wrong destination and in the case, we'll import a register into this 9615 insn that might have already have been dead. So fail if none of the 9616 above cases are true. */ 9617 return 0; 9618 } 9619 9620 /* See if X, a SET operation, can be rewritten as a bit-field assignment. 9621 Return that assignment if so. 9622 9623 We only handle the most common cases. */ 9624 9625 static rtx 9626 make_field_assignment (rtx x) 9627 { 9628 rtx dest = SET_DEST (x); 9629 rtx src = SET_SRC (x); 9630 rtx assign; 9631 rtx rhs, lhs; 9632 HOST_WIDE_INT c1; 9633 HOST_WIDE_INT pos; 9634 unsigned HOST_WIDE_INT len; 9635 rtx other; 9636 9637 /* All the rules in this function are specific to scalar integers. */ 9638 scalar_int_mode mode; 9639 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode)) 9640 return x; 9641 9642 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is 9643 a clear of a one-bit field. We will have changed it to 9644 (and (rotate (const_int -2) POS) DEST), so check for that. Also check 9645 for a SUBREG. */ 9646 9647 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE 9648 && CONST_INT_P (XEXP (XEXP (src, 0), 0)) 9649 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2 9650 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1))) 9651 { 9652 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1), 9653 1, 1, 1, 0); 9654 if (assign != 0) 9655 return gen_rtx_SET (assign, const0_rtx); 9656 return x; 9657 } 9658 9659 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG 9660 && subreg_lowpart_p (XEXP (src, 0)) 9661 && partial_subreg_p (XEXP (src, 0)) 9662 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE 9663 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) 9664 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2 9665 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1))) 9666 { 9667 assign = make_extraction (VOIDmode, dest, 0, 9668 XEXP (SUBREG_REG (XEXP (src, 0)), 1), 9669 1, 1, 1, 0); 9670 if (assign != 0) 9671 return gen_rtx_SET (assign, const0_rtx); 9672 return x; 9673 } 9674 9675 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a 9676 one-bit field. */ 9677 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT 9678 && XEXP (XEXP (src, 0), 0) == const1_rtx 9679 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1))) 9680 { 9681 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1), 9682 1, 1, 1, 0); 9683 if (assign != 0) 9684 return gen_rtx_SET (assign, const1_rtx); 9685 return x; 9686 } 9687 9688 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the 9689 SRC is an AND with all bits of that field set, then we can discard 9690 the AND. */ 9691 if (GET_CODE (dest) == ZERO_EXTRACT 9692 && CONST_INT_P (XEXP (dest, 1)) 9693 && GET_CODE (src) == AND 9694 && CONST_INT_P (XEXP (src, 1))) 9695 { 9696 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1)); 9697 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1)); 9698 unsigned HOST_WIDE_INT ze_mask; 9699 9700 if (width >= HOST_BITS_PER_WIDE_INT) 9701 ze_mask = -1; 9702 else 9703 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1; 9704 9705 /* Complete overlap. We can remove the source AND. */ 9706 if ((and_mask & ze_mask) == ze_mask) 9707 return gen_rtx_SET (dest, XEXP (src, 0)); 9708 9709 /* Partial overlap. We can reduce the source AND. */ 9710 if ((and_mask & ze_mask) != and_mask) 9711 { 9712 src = gen_rtx_AND (mode, XEXP (src, 0), 9713 gen_int_mode (and_mask & ze_mask, mode)); 9714 return gen_rtx_SET (dest, src); 9715 } 9716 } 9717 9718 /* The other case we handle is assignments into a constant-position 9719 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents 9720 a mask that has all one bits except for a group of zero bits and 9721 OTHER is known to have zeros where C1 has ones, this is such an 9722 assignment. Compute the position and length from C1. Shift OTHER 9723 to the appropriate position, force it to the required mode, and 9724 make the extraction. Check for the AND in both operands. */ 9725 9726 /* One or more SUBREGs might obscure the constant-position field 9727 assignment. The first one we are likely to encounter is an outer 9728 narrowing SUBREG, which we can just strip for the purposes of 9729 identifying the constant-field assignment. */ 9730 scalar_int_mode src_mode = mode; 9731 if (GET_CODE (src) == SUBREG 9732 && subreg_lowpart_p (src) 9733 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode)) 9734 src = SUBREG_REG (src); 9735 9736 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR) 9737 return x; 9738 9739 rhs = expand_compound_operation (XEXP (src, 0)); 9740 lhs = expand_compound_operation (XEXP (src, 1)); 9741 9742 if (GET_CODE (rhs) == AND 9743 && CONST_INT_P (XEXP (rhs, 1)) 9744 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest)) 9745 c1 = INTVAL (XEXP (rhs, 1)), other = lhs; 9746 /* The second SUBREG that might get in the way is a paradoxical 9747 SUBREG around the first operand of the AND. We want to 9748 pretend the operand is as wide as the destination here. We 9749 do this by adjusting the MEM to wider mode for the sole 9750 purpose of the call to rtx_equal_for_field_assignment_p. Also 9751 note this trick only works for MEMs. */ 9752 else if (GET_CODE (rhs) == AND 9753 && paradoxical_subreg_p (XEXP (rhs, 0)) 9754 && MEM_P (SUBREG_REG (XEXP (rhs, 0))) 9755 && CONST_INT_P (XEXP (rhs, 1)) 9756 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)), 9757 dest, true)) 9758 c1 = INTVAL (XEXP (rhs, 1)), other = lhs; 9759 else if (GET_CODE (lhs) == AND 9760 && CONST_INT_P (XEXP (lhs, 1)) 9761 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest)) 9762 c1 = INTVAL (XEXP (lhs, 1)), other = rhs; 9763 /* The second SUBREG that might get in the way is a paradoxical 9764 SUBREG around the first operand of the AND. We want to 9765 pretend the operand is as wide as the destination here. We 9766 do this by adjusting the MEM to wider mode for the sole 9767 purpose of the call to rtx_equal_for_field_assignment_p. Also 9768 note this trick only works for MEMs. */ 9769 else if (GET_CODE (lhs) == AND 9770 && paradoxical_subreg_p (XEXP (lhs, 0)) 9771 && MEM_P (SUBREG_REG (XEXP (lhs, 0))) 9772 && CONST_INT_P (XEXP (lhs, 1)) 9773 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)), 9774 dest, true)) 9775 c1 = INTVAL (XEXP (lhs, 1)), other = rhs; 9776 else 9777 return x; 9778 9779 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len); 9780 if (pos < 0 9781 || pos + len > GET_MODE_PRECISION (mode) 9782 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT 9783 || (c1 & nonzero_bits (other, mode)) != 0) 9784 return x; 9785 9786 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0); 9787 if (assign == 0) 9788 return x; 9789 9790 /* The mode to use for the source is the mode of the assignment, or of 9791 what is inside a possible STRICT_LOW_PART. */ 9792 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART 9793 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign)); 9794 9795 /* Shift OTHER right POS places and make it the source, restricting it 9796 to the proper length and mode. */ 9797 9798 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT, 9799 src_mode, other, pos), 9800 dest); 9801 src = force_to_mode (src, new_mode, 9802 len >= HOST_BITS_PER_WIDE_INT 9803 ? HOST_WIDE_INT_M1U 9804 : (HOST_WIDE_INT_1U << len) - 1, 9805 0); 9806 9807 /* If SRC is masked by an AND that does not make a difference in 9808 the value being stored, strip it. */ 9809 if (GET_CODE (assign) == ZERO_EXTRACT 9810 && CONST_INT_P (XEXP (assign, 1)) 9811 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT 9812 && GET_CODE (src) == AND 9813 && CONST_INT_P (XEXP (src, 1)) 9814 && UINTVAL (XEXP (src, 1)) 9815 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1) 9816 src = XEXP (src, 0); 9817 9818 return gen_rtx_SET (assign, src); 9819 } 9820 9821 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c) 9822 if so. */ 9823 9824 static rtx 9825 apply_distributive_law (rtx x) 9826 { 9827 enum rtx_code code = GET_CODE (x); 9828 enum rtx_code inner_code; 9829 rtx lhs, rhs, other; 9830 rtx tem; 9831 9832 /* Distributivity is not true for floating point as it can change the 9833 value. So we don't do it unless -funsafe-math-optimizations. */ 9834 if (FLOAT_MODE_P (GET_MODE (x)) 9835 && ! flag_unsafe_math_optimizations) 9836 return x; 9837 9838 /* The outer operation can only be one of the following: */ 9839 if (code != IOR && code != AND && code != XOR 9840 && code != PLUS && code != MINUS) 9841 return x; 9842 9843 lhs = XEXP (x, 0); 9844 rhs = XEXP (x, 1); 9845 9846 /* If either operand is a primitive we can't do anything, so get out 9847 fast. */ 9848 if (OBJECT_P (lhs) || OBJECT_P (rhs)) 9849 return x; 9850 9851 lhs = expand_compound_operation (lhs); 9852 rhs = expand_compound_operation (rhs); 9853 inner_code = GET_CODE (lhs); 9854 if (inner_code != GET_CODE (rhs)) 9855 return x; 9856 9857 /* See if the inner and outer operations distribute. */ 9858 switch (inner_code) 9859 { 9860 case LSHIFTRT: 9861 case ASHIFTRT: 9862 case AND: 9863 case IOR: 9864 /* These all distribute except over PLUS. */ 9865 if (code == PLUS || code == MINUS) 9866 return x; 9867 break; 9868 9869 case MULT: 9870 if (code != PLUS && code != MINUS) 9871 return x; 9872 break; 9873 9874 case ASHIFT: 9875 /* This is also a multiply, so it distributes over everything. */ 9876 break; 9877 9878 /* This used to handle SUBREG, but this turned out to be counter- 9879 productive, since (subreg (op ...)) usually is not handled by 9880 insn patterns, and this "optimization" therefore transformed 9881 recognizable patterns into unrecognizable ones. Therefore the 9882 SUBREG case was removed from here. 9883 9884 It is possible that distributing SUBREG over arithmetic operations 9885 leads to an intermediate result than can then be optimized further, 9886 e.g. by moving the outer SUBREG to the other side of a SET as done 9887 in simplify_set. This seems to have been the original intent of 9888 handling SUBREGs here. 9889 9890 However, with current GCC this does not appear to actually happen, 9891 at least on major platforms. If some case is found where removing 9892 the SUBREG case here prevents follow-on optimizations, distributing 9893 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */ 9894 9895 default: 9896 return x; 9897 } 9898 9899 /* Set LHS and RHS to the inner operands (A and B in the example 9900 above) and set OTHER to the common operand (C in the example). 9901 There is only one way to do this unless the inner operation is 9902 commutative. */ 9903 if (COMMUTATIVE_ARITH_P (lhs) 9904 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0))) 9905 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1); 9906 else if (COMMUTATIVE_ARITH_P (lhs) 9907 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1))) 9908 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0); 9909 else if (COMMUTATIVE_ARITH_P (lhs) 9910 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0))) 9911 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1); 9912 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1))) 9913 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0); 9914 else 9915 return x; 9916 9917 /* Form the new inner operation, seeing if it simplifies first. */ 9918 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs); 9919 9920 /* There is one exception to the general way of distributing: 9921 (a | c) ^ (b | c) -> (a ^ b) & ~c */ 9922 if (code == XOR && inner_code == IOR) 9923 { 9924 inner_code = AND; 9925 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x)); 9926 } 9927 9928 /* We may be able to continuing distributing the result, so call 9929 ourselves recursively on the inner operation before forming the 9930 outer operation, which we return. */ 9931 return simplify_gen_binary (inner_code, GET_MODE (x), 9932 apply_distributive_law (tem), other); 9933 } 9934 9935 /* See if X is of the form (* (+ A B) C), and if so convert to 9936 (+ (* A C) (* B C)) and try to simplify. 9937 9938 Most of the time, this results in no change. However, if some of 9939 the operands are the same or inverses of each other, simplifications 9940 will result. 9941 9942 For example, (and (ior A B) (not B)) can occur as the result of 9943 expanding a bit field assignment. When we apply the distributive 9944 law to this, we get (ior (and (A (not B))) (and (B (not B)))), 9945 which then simplifies to (and (A (not B))). 9946 9947 Note that no checks happen on the validity of applying the inverse 9948 distributive law. This is pointless since we can do it in the 9949 few places where this routine is called. 9950 9951 N is the index of the term that is decomposed (the arithmetic operation, 9952 i.e. (+ A B) in the first example above). !N is the index of the term that 9953 is distributed, i.e. of C in the first example above. */ 9954 static rtx 9955 distribute_and_simplify_rtx (rtx x, int n) 9956 { 9957 machine_mode mode; 9958 enum rtx_code outer_code, inner_code; 9959 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp; 9960 9961 /* Distributivity is not true for floating point as it can change the 9962 value. So we don't do it unless -funsafe-math-optimizations. */ 9963 if (FLOAT_MODE_P (GET_MODE (x)) 9964 && ! flag_unsafe_math_optimizations) 9965 return NULL_RTX; 9966 9967 decomposed = XEXP (x, n); 9968 if (!ARITHMETIC_P (decomposed)) 9969 return NULL_RTX; 9970 9971 mode = GET_MODE (x); 9972 outer_code = GET_CODE (x); 9973 distributed = XEXP (x, !n); 9974 9975 inner_code = GET_CODE (decomposed); 9976 inner_op0 = XEXP (decomposed, 0); 9977 inner_op1 = XEXP (decomposed, 1); 9978 9979 /* Special case (and (xor B C) (not A)), which is equivalent to 9980 (xor (ior A B) (ior A C)) */ 9981 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT) 9982 { 9983 distributed = XEXP (distributed, 0); 9984 outer_code = IOR; 9985 } 9986 9987 if (n == 0) 9988 { 9989 /* Distribute the second term. */ 9990 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed); 9991 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed); 9992 } 9993 else 9994 { 9995 /* Distribute the first term. */ 9996 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0); 9997 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1); 9998 } 9999 10000 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode, 10001 new_op0, new_op1)); 10002 if (GET_CODE (tmp) != outer_code 10003 && (set_src_cost (tmp, mode, optimize_this_for_speed_p) 10004 < set_src_cost (x, mode, optimize_this_for_speed_p))) 10005 return tmp; 10006 10007 return NULL_RTX; 10008 } 10009 10010 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done 10011 in MODE. Return an equivalent form, if different from (and VAROP 10012 (const_int CONSTOP)). Otherwise, return NULL_RTX. */ 10013 10014 static rtx 10015 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop, 10016 unsigned HOST_WIDE_INT constop) 10017 { 10018 unsigned HOST_WIDE_INT nonzero; 10019 unsigned HOST_WIDE_INT orig_constop; 10020 rtx orig_varop; 10021 int i; 10022 10023 orig_varop = varop; 10024 orig_constop = constop; 10025 if (GET_CODE (varop) == CLOBBER) 10026 return NULL_RTX; 10027 10028 /* Simplify VAROP knowing that we will be only looking at some of the 10029 bits in it. 10030 10031 Note by passing in CONSTOP, we guarantee that the bits not set in 10032 CONSTOP are not significant and will never be examined. We must 10033 ensure that is the case by explicitly masking out those bits 10034 before returning. */ 10035 varop = force_to_mode (varop, mode, constop, 0); 10036 10037 /* If VAROP is a CLOBBER, we will fail so return it. */ 10038 if (GET_CODE (varop) == CLOBBER) 10039 return varop; 10040 10041 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP 10042 to VAROP and return the new constant. */ 10043 if (CONST_INT_P (varop)) 10044 return gen_int_mode (INTVAL (varop) & constop, mode); 10045 10046 /* See what bits may be nonzero in VAROP. Unlike the general case of 10047 a call to nonzero_bits, here we don't care about bits outside 10048 MODE. */ 10049 10050 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode); 10051 10052 /* Turn off all bits in the constant that are known to already be zero. 10053 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS 10054 which is tested below. */ 10055 10056 constop &= nonzero; 10057 10058 /* If we don't have any bits left, return zero. */ 10059 if (constop == 0) 10060 return const0_rtx; 10061 10062 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is 10063 a power of two, we can replace this with an ASHIFT. */ 10064 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1 10065 && (i = exact_log2 (constop)) >= 0) 10066 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i); 10067 10068 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR 10069 or XOR, then try to apply the distributive law. This may eliminate 10070 operations if either branch can be simplified because of the AND. 10071 It may also make some cases more complex, but those cases probably 10072 won't match a pattern either with or without this. */ 10073 10074 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR) 10075 { 10076 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop)); 10077 return 10078 gen_lowpart 10079 (mode, 10080 apply_distributive_law 10081 (simplify_gen_binary (GET_CODE (varop), varop_mode, 10082 simplify_and_const_int (NULL_RTX, varop_mode, 10083 XEXP (varop, 0), 10084 constop), 10085 simplify_and_const_int (NULL_RTX, varop_mode, 10086 XEXP (varop, 1), 10087 constop)))); 10088 } 10089 10090 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute 10091 the AND and see if one of the operands simplifies to zero. If so, we 10092 may eliminate it. */ 10093 10094 if (GET_CODE (varop) == PLUS 10095 && pow2p_hwi (constop + 1)) 10096 { 10097 rtx o0, o1; 10098 10099 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop); 10100 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop); 10101 if (o0 == const0_rtx) 10102 return o1; 10103 if (o1 == const0_rtx) 10104 return o0; 10105 } 10106 10107 /* Make a SUBREG if necessary. If we can't make it, fail. */ 10108 varop = gen_lowpart (mode, varop); 10109 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER) 10110 return NULL_RTX; 10111 10112 /* If we are only masking insignificant bits, return VAROP. */ 10113 if (constop == nonzero) 10114 return varop; 10115 10116 if (varop == orig_varop && constop == orig_constop) 10117 return NULL_RTX; 10118 10119 /* Otherwise, return an AND. */ 10120 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode)); 10121 } 10122 10123 10124 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done 10125 in MODE. 10126 10127 Return an equivalent form, if different from X. Otherwise, return X. If 10128 X is zero, we are to always construct the equivalent form. */ 10129 10130 static rtx 10131 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop, 10132 unsigned HOST_WIDE_INT constop) 10133 { 10134 rtx tem = simplify_and_const_int_1 (mode, varop, constop); 10135 if (tem) 10136 return tem; 10137 10138 if (!x) 10139 x = simplify_gen_binary (AND, GET_MODE (varop), varop, 10140 gen_int_mode (constop, mode)); 10141 if (GET_MODE (x) != mode) 10142 x = gen_lowpart (mode, x); 10143 return x; 10144 } 10145 10146 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero. 10147 We don't care about bits outside of those defined in MODE. 10148 10149 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is 10150 a shift, AND, or zero_extract, we can do better. */ 10151 10152 static rtx 10153 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode, 10154 scalar_int_mode mode, 10155 unsigned HOST_WIDE_INT *nonzero) 10156 { 10157 rtx tem; 10158 reg_stat_type *rsp; 10159 10160 /* If X is a register whose nonzero bits value is current, use it. 10161 Otherwise, if X is a register whose value we can find, use that 10162 value. Otherwise, use the previously-computed global nonzero bits 10163 for this register. */ 10164 10165 rsp = ®_stat[REGNO (x)]; 10166 if (rsp->last_set_value != 0 10167 && (rsp->last_set_mode == mode 10168 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT 10169 && GET_MODE_CLASS (mode) == MODE_INT)) 10170 && ((rsp->last_set_label >= label_tick_ebb_start 10171 && rsp->last_set_label < label_tick) 10172 || (rsp->last_set_label == label_tick 10173 && DF_INSN_LUID (rsp->last_set) < subst_low_luid) 10174 || (REGNO (x) >= FIRST_PSEUDO_REGISTER 10175 && REGNO (x) < reg_n_sets_max 10176 && REG_N_SETS (REGNO (x)) == 1 10177 && !REGNO_REG_SET_P 10178 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), 10179 REGNO (x))))) 10180 { 10181 /* Note that, even if the precision of last_set_mode is lower than that 10182 of mode, record_value_for_reg invoked nonzero_bits on the register 10183 with nonzero_bits_mode (because last_set_mode is necessarily integral 10184 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode 10185 are all valid, hence in mode too since nonzero_bits_mode is defined 10186 to the largest HWI_COMPUTABLE_MODE_P mode. */ 10187 *nonzero &= rsp->last_set_nonzero_bits; 10188 return NULL; 10189 } 10190 10191 tem = get_last_value (x); 10192 if (tem) 10193 { 10194 if (SHORT_IMMEDIATES_SIGN_EXTEND) 10195 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode)); 10196 10197 return tem; 10198 } 10199 10200 if (nonzero_sign_valid && rsp->nonzero_bits) 10201 { 10202 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits; 10203 10204 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode)) 10205 /* We don't know anything about the upper bits. */ 10206 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode); 10207 10208 *nonzero &= mask; 10209 } 10210 10211 return NULL; 10212 } 10213 10214 /* Given a reg X of mode XMODE, return the number of bits at the high-order 10215 end of X that are known to be equal to the sign bit. X will be used 10216 in mode MODE; the returned value will always be between 1 and the 10217 number of bits in MODE. */ 10218 10219 static rtx 10220 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode, 10221 scalar_int_mode mode, 10222 unsigned int *result) 10223 { 10224 rtx tem; 10225 reg_stat_type *rsp; 10226 10227 rsp = ®_stat[REGNO (x)]; 10228 if (rsp->last_set_value != 0 10229 && rsp->last_set_mode == mode 10230 && ((rsp->last_set_label >= label_tick_ebb_start 10231 && rsp->last_set_label < label_tick) 10232 || (rsp->last_set_label == label_tick 10233 && DF_INSN_LUID (rsp->last_set) < subst_low_luid) 10234 || (REGNO (x) >= FIRST_PSEUDO_REGISTER 10235 && REGNO (x) < reg_n_sets_max 10236 && REG_N_SETS (REGNO (x)) == 1 10237 && !REGNO_REG_SET_P 10238 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), 10239 REGNO (x))))) 10240 { 10241 *result = rsp->last_set_sign_bit_copies; 10242 return NULL; 10243 } 10244 10245 tem = get_last_value (x); 10246 if (tem != 0) 10247 return tem; 10248 10249 if (nonzero_sign_valid && rsp->sign_bit_copies != 0 10250 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode)) 10251 *result = rsp->sign_bit_copies; 10252 10253 return NULL; 10254 } 10255 10256 /* Return the number of "extended" bits there are in X, when interpreted 10257 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For 10258 unsigned quantities, this is the number of high-order zero bits. 10259 For signed quantities, this is the number of copies of the sign bit 10260 minus 1. In both case, this function returns the number of "spare" 10261 bits. For example, if two quantities for which this function returns 10262 at least 1 are added, the addition is known not to overflow. 10263 10264 This function will always return 0 unless called during combine, which 10265 implies that it must be called from a define_split. */ 10266 10267 unsigned int 10268 extended_count (const_rtx x, machine_mode mode, int unsignedp) 10269 { 10270 if (nonzero_sign_valid == 0) 10271 return 0; 10272 10273 scalar_int_mode int_mode; 10274 return (unsignedp 10275 ? (is_a <scalar_int_mode> (mode, &int_mode) 10276 && HWI_COMPUTABLE_MODE_P (int_mode) 10277 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1 10278 - floor_log2 (nonzero_bits (x, int_mode))) 10279 : 0) 10280 : num_sign_bit_copies (x, mode) - 1); 10281 } 10282 10283 /* This function is called from `simplify_shift_const' to merge two 10284 outer operations. Specifically, we have already found that we need 10285 to perform operation *POP0 with constant *PCONST0 at the outermost 10286 position. We would now like to also perform OP1 with constant CONST1 10287 (with *POP0 being done last). 10288 10289 Return 1 if we can do the operation and update *POP0 and *PCONST0 with 10290 the resulting operation. *PCOMP_P is set to 1 if we would need to 10291 complement the innermost operand, otherwise it is unchanged. 10292 10293 MODE is the mode in which the operation will be done. No bits outside 10294 the width of this mode matter. It is assumed that the width of this mode 10295 is smaller than or equal to HOST_BITS_PER_WIDE_INT. 10296 10297 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS, 10298 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper 10299 result is simply *PCONST0. 10300 10301 If the resulting operation cannot be expressed as one operation, we 10302 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */ 10303 10304 static int 10305 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p) 10306 { 10307 enum rtx_code op0 = *pop0; 10308 HOST_WIDE_INT const0 = *pconst0; 10309 10310 const0 &= GET_MODE_MASK (mode); 10311 const1 &= GET_MODE_MASK (mode); 10312 10313 /* If OP0 is an AND, clear unimportant bits in CONST1. */ 10314 if (op0 == AND) 10315 const1 &= const0; 10316 10317 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or 10318 if OP0 is SET. */ 10319 10320 if (op1 == UNKNOWN || op0 == SET) 10321 return 1; 10322 10323 else if (op0 == UNKNOWN) 10324 op0 = op1, const0 = const1; 10325 10326 else if (op0 == op1) 10327 { 10328 switch (op0) 10329 { 10330 case AND: 10331 const0 &= const1; 10332 break; 10333 case IOR: 10334 const0 |= const1; 10335 break; 10336 case XOR: 10337 const0 ^= const1; 10338 break; 10339 case PLUS: 10340 const0 += const1; 10341 break; 10342 case NEG: 10343 op0 = UNKNOWN; 10344 break; 10345 default: 10346 break; 10347 } 10348 } 10349 10350 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */ 10351 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG) 10352 return 0; 10353 10354 /* If the two constants aren't the same, we can't do anything. The 10355 remaining six cases can all be done. */ 10356 else if (const0 != const1) 10357 return 0; 10358 10359 else 10360 switch (op0) 10361 { 10362 case IOR: 10363 if (op1 == AND) 10364 /* (a & b) | b == b */ 10365 op0 = SET; 10366 else /* op1 == XOR */ 10367 /* (a ^ b) | b == a | b */ 10368 {;} 10369 break; 10370 10371 case XOR: 10372 if (op1 == AND) 10373 /* (a & b) ^ b == (~a) & b */ 10374 op0 = AND, *pcomp_p = 1; 10375 else /* op1 == IOR */ 10376 /* (a | b) ^ b == a & ~b */ 10377 op0 = AND, const0 = ~const0; 10378 break; 10379 10380 case AND: 10381 if (op1 == IOR) 10382 /* (a | b) & b == b */ 10383 op0 = SET; 10384 else /* op1 == XOR */ 10385 /* (a ^ b) & b) == (~a) & b */ 10386 *pcomp_p = 1; 10387 break; 10388 default: 10389 break; 10390 } 10391 10392 /* Check for NO-OP cases. */ 10393 const0 &= GET_MODE_MASK (mode); 10394 if (const0 == 0 10395 && (op0 == IOR || op0 == XOR || op0 == PLUS)) 10396 op0 = UNKNOWN; 10397 else if (const0 == 0 && op0 == AND) 10398 op0 = SET; 10399 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode) 10400 && op0 == AND) 10401 op0 = UNKNOWN; 10402 10403 *pop0 = op0; 10404 10405 /* ??? Slightly redundant with the above mask, but not entirely. 10406 Moving this above means we'd have to sign-extend the mode mask 10407 for the final test. */ 10408 if (op0 != UNKNOWN && op0 != NEG) 10409 *pconst0 = trunc_int_for_mode (const0, mode); 10410 10411 return 1; 10412 } 10413 10414 /* A helper to simplify_shift_const_1 to determine the mode we can perform 10415 the shift in. The original shift operation CODE is performed on OP in 10416 ORIG_MODE. Return the wider mode MODE if we can perform the operation 10417 in that mode. Return ORIG_MODE otherwise. We can also assume that the 10418 result of the shift is subject to operation OUTER_CODE with operand 10419 OUTER_CONST. */ 10420 10421 static scalar_int_mode 10422 try_widen_shift_mode (enum rtx_code code, rtx op, int count, 10423 scalar_int_mode orig_mode, scalar_int_mode mode, 10424 enum rtx_code outer_code, HOST_WIDE_INT outer_const) 10425 { 10426 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode)); 10427 10428 /* In general we can't perform in wider mode for right shift and rotate. */ 10429 switch (code) 10430 { 10431 case ASHIFTRT: 10432 /* We can still widen if the bits brought in from the left are identical 10433 to the sign bit of ORIG_MODE. */ 10434 if (num_sign_bit_copies (op, mode) 10435 > (unsigned) (GET_MODE_PRECISION (mode) 10436 - GET_MODE_PRECISION (orig_mode))) 10437 return mode; 10438 return orig_mode; 10439 10440 case LSHIFTRT: 10441 /* Similarly here but with zero bits. */ 10442 if (HWI_COMPUTABLE_MODE_P (mode) 10443 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0) 10444 return mode; 10445 10446 /* We can also widen if the bits brought in will be masked off. This 10447 operation is performed in ORIG_MODE. */ 10448 if (outer_code == AND) 10449 { 10450 int care_bits = low_bitmask_len (orig_mode, outer_const); 10451 10452 if (care_bits >= 0 10453 && GET_MODE_PRECISION (orig_mode) - care_bits >= count) 10454 return mode; 10455 } 10456 /* fall through */ 10457 10458 case ROTATE: 10459 return orig_mode; 10460 10461 case ROTATERT: 10462 gcc_unreachable (); 10463 10464 default: 10465 return mode; 10466 } 10467 } 10468 10469 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind 10470 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX 10471 if we cannot simplify it. Otherwise, return a simplified value. 10472 10473 The shift is normally computed in the widest mode we find in VAROP, as 10474 long as it isn't a different number of words than RESULT_MODE. Exceptions 10475 are ASHIFTRT and ROTATE, which are always done in their original mode. */ 10476 10477 static rtx 10478 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode, 10479 rtx varop, int orig_count) 10480 { 10481 enum rtx_code orig_code = code; 10482 rtx orig_varop = varop; 10483 int count, log2; 10484 machine_mode mode = result_mode; 10485 machine_mode shift_mode; 10486 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode; 10487 /* We form (outer_op (code varop count) (outer_const)). */ 10488 enum rtx_code outer_op = UNKNOWN; 10489 HOST_WIDE_INT outer_const = 0; 10490 int complement_p = 0; 10491 rtx new_rtx, x; 10492 10493 /* Make sure and truncate the "natural" shift on the way in. We don't 10494 want to do this inside the loop as it makes it more difficult to 10495 combine shifts. */ 10496 if (SHIFT_COUNT_TRUNCATED) 10497 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1; 10498 10499 /* If we were given an invalid count, don't do anything except exactly 10500 what was requested. */ 10501 10502 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode)) 10503 return NULL_RTX; 10504 10505 count = orig_count; 10506 10507 /* Unless one of the branches of the `if' in this loop does a `continue', 10508 we will `break' the loop after the `if'. */ 10509 10510 while (count != 0) 10511 { 10512 /* If we have an operand of (clobber (const_int 0)), fail. */ 10513 if (GET_CODE (varop) == CLOBBER) 10514 return NULL_RTX; 10515 10516 /* Convert ROTATERT to ROTATE. */ 10517 if (code == ROTATERT) 10518 { 10519 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode); 10520 code = ROTATE; 10521 count = bitsize - count; 10522 } 10523 10524 shift_mode = result_mode; 10525 if (shift_mode != mode) 10526 { 10527 /* We only change the modes of scalar shifts. */ 10528 int_mode = as_a <scalar_int_mode> (mode); 10529 int_result_mode = as_a <scalar_int_mode> (result_mode); 10530 shift_mode = try_widen_shift_mode (code, varop, count, 10531 int_result_mode, int_mode, 10532 outer_op, outer_const); 10533 } 10534 10535 scalar_int_mode shift_unit_mode 10536 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode)); 10537 10538 /* Handle cases where the count is greater than the size of the mode 10539 minus 1. For ASHIFT, use the size minus one as the count (this can 10540 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates, 10541 take the count modulo the size. For other shifts, the result is 10542 zero. 10543 10544 Since these shifts are being produced by the compiler by combining 10545 multiple operations, each of which are defined, we know what the 10546 result is supposed to be. */ 10547 10548 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1)) 10549 { 10550 if (code == ASHIFTRT) 10551 count = GET_MODE_PRECISION (shift_unit_mode) - 1; 10552 else if (code == ROTATE || code == ROTATERT) 10553 count %= GET_MODE_PRECISION (shift_unit_mode); 10554 else 10555 { 10556 /* We can't simply return zero because there may be an 10557 outer op. */ 10558 varop = const0_rtx; 10559 count = 0; 10560 break; 10561 } 10562 } 10563 10564 /* If we discovered we had to complement VAROP, leave. Making a NOT 10565 here would cause an infinite loop. */ 10566 if (complement_p) 10567 break; 10568 10569 if (shift_mode == shift_unit_mode) 10570 { 10571 /* An arithmetic right shift of a quantity known to be -1 or 0 10572 is a no-op. */ 10573 if (code == ASHIFTRT 10574 && (num_sign_bit_copies (varop, shift_unit_mode) 10575 == GET_MODE_PRECISION (shift_unit_mode))) 10576 { 10577 count = 0; 10578 break; 10579 } 10580 10581 /* If we are doing an arithmetic right shift and discarding all but 10582 the sign bit copies, this is equivalent to doing a shift by the 10583 bitsize minus one. Convert it into that shift because it will 10584 often allow other simplifications. */ 10585 10586 if (code == ASHIFTRT 10587 && (count + num_sign_bit_copies (varop, shift_unit_mode) 10588 >= GET_MODE_PRECISION (shift_unit_mode))) 10589 count = GET_MODE_PRECISION (shift_unit_mode) - 1; 10590 10591 /* We simplify the tests below and elsewhere by converting 10592 ASHIFTRT to LSHIFTRT if we know the sign bit is clear. 10593 `make_compound_operation' will convert it to an ASHIFTRT for 10594 those machines (such as VAX) that don't have an LSHIFTRT. */ 10595 if (code == ASHIFTRT 10596 && HWI_COMPUTABLE_MODE_P (shift_unit_mode) 10597 && val_signbit_known_clear_p (shift_unit_mode, 10598 nonzero_bits (varop, 10599 shift_unit_mode))) 10600 code = LSHIFTRT; 10601 10602 if (((code == LSHIFTRT 10603 && HWI_COMPUTABLE_MODE_P (shift_unit_mode) 10604 && !(nonzero_bits (varop, shift_unit_mode) >> count)) 10605 || (code == ASHIFT 10606 && HWI_COMPUTABLE_MODE_P (shift_unit_mode) 10607 && !((nonzero_bits (varop, shift_unit_mode) << count) 10608 & GET_MODE_MASK (shift_unit_mode)))) 10609 && !side_effects_p (varop)) 10610 varop = const0_rtx; 10611 } 10612 10613 switch (GET_CODE (varop)) 10614 { 10615 case SIGN_EXTEND: 10616 case ZERO_EXTEND: 10617 case SIGN_EXTRACT: 10618 case ZERO_EXTRACT: 10619 new_rtx = expand_compound_operation (varop); 10620 if (new_rtx != varop) 10621 { 10622 varop = new_rtx; 10623 continue; 10624 } 10625 break; 10626 10627 case MEM: 10628 /* The following rules apply only to scalars. */ 10629 if (shift_mode != shift_unit_mode) 10630 break; 10631 int_mode = as_a <scalar_int_mode> (mode); 10632 10633 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH 10634 minus the width of a smaller mode, we can do this with a 10635 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */ 10636 if ((code == ASHIFTRT || code == LSHIFTRT) 10637 && ! mode_dependent_address_p (XEXP (varop, 0), 10638 MEM_ADDR_SPACE (varop)) 10639 && ! MEM_VOLATILE_P (varop) 10640 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1) 10641 .exists (&tmode))) 10642 { 10643 new_rtx = adjust_address_nv (varop, tmode, 10644 BYTES_BIG_ENDIAN ? 0 10645 : count / BITS_PER_UNIT); 10646 10647 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND 10648 : ZERO_EXTEND, int_mode, new_rtx); 10649 count = 0; 10650 continue; 10651 } 10652 break; 10653 10654 case SUBREG: 10655 /* The following rules apply only to scalars. */ 10656 if (shift_mode != shift_unit_mode) 10657 break; 10658 int_mode = as_a <scalar_int_mode> (mode); 10659 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop)); 10660 10661 /* If VAROP is a SUBREG, strip it as long as the inner operand has 10662 the same number of words as what we've seen so far. Then store 10663 the widest mode in MODE. */ 10664 if (subreg_lowpart_p (varop) 10665 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode) 10666 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode) 10667 && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD) 10668 == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD)) 10669 && GET_MODE_CLASS (int_varop_mode) == MODE_INT) 10670 { 10671 varop = SUBREG_REG (varop); 10672 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode)) 10673 mode = inner_mode; 10674 continue; 10675 } 10676 break; 10677 10678 case MULT: 10679 /* Some machines use MULT instead of ASHIFT because MULT 10680 is cheaper. But it is still better on those machines to 10681 merge two shifts into one. */ 10682 if (CONST_INT_P (XEXP (varop, 1)) 10683 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0) 10684 { 10685 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2); 10686 varop = simplify_gen_binary (ASHIFT, GET_MODE (varop), 10687 XEXP (varop, 0), log2_rtx); 10688 continue; 10689 } 10690 break; 10691 10692 case UDIV: 10693 /* Similar, for when divides are cheaper. */ 10694 if (CONST_INT_P (XEXP (varop, 1)) 10695 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0) 10696 { 10697 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2); 10698 varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop), 10699 XEXP (varop, 0), log2_rtx); 10700 continue; 10701 } 10702 break; 10703 10704 case ASHIFTRT: 10705 /* If we are extracting just the sign bit of an arithmetic 10706 right shift, that shift is not needed. However, the sign 10707 bit of a wider mode may be different from what would be 10708 interpreted as the sign bit in a narrower mode, so, if 10709 the result is narrower, don't discard the shift. */ 10710 if (code == LSHIFTRT 10711 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1) 10712 && (GET_MODE_UNIT_BITSIZE (result_mode) 10713 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop)))) 10714 { 10715 varop = XEXP (varop, 0); 10716 continue; 10717 } 10718 10719 /* fall through */ 10720 10721 case LSHIFTRT: 10722 case ASHIFT: 10723 case ROTATE: 10724 /* The following rules apply only to scalars. */ 10725 if (shift_mode != shift_unit_mode) 10726 break; 10727 int_mode = as_a <scalar_int_mode> (mode); 10728 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop)); 10729 int_result_mode = as_a <scalar_int_mode> (result_mode); 10730 10731 /* Here we have two nested shifts. The result is usually the 10732 AND of a new shift with a mask. We compute the result below. */ 10733 if (CONST_INT_P (XEXP (varop, 1)) 10734 && INTVAL (XEXP (varop, 1)) >= 0 10735 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode) 10736 && HWI_COMPUTABLE_MODE_P (int_result_mode) 10737 && HWI_COMPUTABLE_MODE_P (int_mode)) 10738 { 10739 enum rtx_code first_code = GET_CODE (varop); 10740 unsigned int first_count = INTVAL (XEXP (varop, 1)); 10741 unsigned HOST_WIDE_INT mask; 10742 rtx mask_rtx; 10743 10744 /* We have one common special case. We can't do any merging if 10745 the inner code is an ASHIFTRT of a smaller mode. However, if 10746 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2) 10747 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2), 10748 we can convert it to 10749 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1). 10750 This simplifies certain SIGN_EXTEND operations. */ 10751 if (code == ASHIFT && first_code == ASHIFTRT 10752 && count == (GET_MODE_PRECISION (int_result_mode) 10753 - GET_MODE_PRECISION (int_varop_mode))) 10754 { 10755 /* C3 has the low-order C1 bits zero. */ 10756 10757 mask = GET_MODE_MASK (int_mode) 10758 & ~((HOST_WIDE_INT_1U << first_count) - 1); 10759 10760 varop = simplify_and_const_int (NULL_RTX, int_result_mode, 10761 XEXP (varop, 0), mask); 10762 varop = simplify_shift_const (NULL_RTX, ASHIFT, 10763 int_result_mode, varop, count); 10764 count = first_count; 10765 code = ASHIFTRT; 10766 continue; 10767 } 10768 10769 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more 10770 than C1 high-order bits equal to the sign bit, we can convert 10771 this to either an ASHIFT or an ASHIFTRT depending on the 10772 two counts. 10773 10774 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */ 10775 10776 if (code == ASHIFTRT && first_code == ASHIFT 10777 && int_varop_mode == shift_unit_mode 10778 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode) 10779 > first_count)) 10780 { 10781 varop = XEXP (varop, 0); 10782 count -= first_count; 10783 if (count < 0) 10784 { 10785 count = -count; 10786 code = ASHIFT; 10787 } 10788 10789 continue; 10790 } 10791 10792 /* There are some cases we can't do. If CODE is ASHIFTRT, 10793 we can only do this if FIRST_CODE is also ASHIFTRT. 10794 10795 We can't do the case when CODE is ROTATE and FIRST_CODE is 10796 ASHIFTRT. 10797 10798 If the mode of this shift is not the mode of the outer shift, 10799 we can't do this if either shift is a right shift or ROTATE. 10800 10801 Finally, we can't do any of these if the mode is too wide 10802 unless the codes are the same. 10803 10804 Handle the case where the shift codes are the same 10805 first. */ 10806 10807 if (code == first_code) 10808 { 10809 if (int_varop_mode != int_result_mode 10810 && (code == ASHIFTRT || code == LSHIFTRT 10811 || code == ROTATE)) 10812 break; 10813 10814 count += first_count; 10815 varop = XEXP (varop, 0); 10816 continue; 10817 } 10818 10819 if (code == ASHIFTRT 10820 || (code == ROTATE && first_code == ASHIFTRT) 10821 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT 10822 || (int_varop_mode != int_result_mode 10823 && (first_code == ASHIFTRT || first_code == LSHIFTRT 10824 || first_code == ROTATE 10825 || code == ROTATE))) 10826 break; 10827 10828 /* To compute the mask to apply after the shift, shift the 10829 nonzero bits of the inner shift the same way the 10830 outer shift will. */ 10831 10832 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode), 10833 int_result_mode); 10834 rtx count_rtx = gen_int_shift_amount (int_result_mode, count); 10835 mask_rtx 10836 = simplify_const_binary_operation (code, int_result_mode, 10837 mask_rtx, count_rtx); 10838 10839 /* Give up if we can't compute an outer operation to use. */ 10840 if (mask_rtx == 0 10841 || !CONST_INT_P (mask_rtx) 10842 || ! merge_outer_ops (&outer_op, &outer_const, AND, 10843 INTVAL (mask_rtx), 10844 int_result_mode, &complement_p)) 10845 break; 10846 10847 /* If the shifts are in the same direction, we add the 10848 counts. Otherwise, we subtract them. */ 10849 if ((code == ASHIFTRT || code == LSHIFTRT) 10850 == (first_code == ASHIFTRT || first_code == LSHIFTRT)) 10851 count += first_count; 10852 else 10853 count -= first_count; 10854 10855 /* If COUNT is positive, the new shift is usually CODE, 10856 except for the two exceptions below, in which case it is 10857 FIRST_CODE. If the count is negative, FIRST_CODE should 10858 always be used */ 10859 if (count > 0 10860 && ((first_code == ROTATE && code == ASHIFT) 10861 || (first_code == ASHIFTRT && code == LSHIFTRT))) 10862 code = first_code; 10863 else if (count < 0) 10864 code = first_code, count = -count; 10865 10866 varop = XEXP (varop, 0); 10867 continue; 10868 } 10869 10870 /* If we have (A << B << C) for any shift, we can convert this to 10871 (A << C << B). This wins if A is a constant. Only try this if 10872 B is not a constant. */ 10873 10874 else if (GET_CODE (varop) == code 10875 && CONST_INT_P (XEXP (varop, 0)) 10876 && !CONST_INT_P (XEXP (varop, 1))) 10877 { 10878 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make 10879 sure the result will be masked. See PR70222. */ 10880 if (code == LSHIFTRT 10881 && int_mode != int_result_mode 10882 && !merge_outer_ops (&outer_op, &outer_const, AND, 10883 GET_MODE_MASK (int_result_mode) 10884 >> orig_count, int_result_mode, 10885 &complement_p)) 10886 break; 10887 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing 10888 up outer sign extension (often left and right shift) is 10889 hardly more efficient than the original. See PR70429. */ 10890 if (code == ASHIFTRT && int_mode != int_result_mode) 10891 break; 10892 10893 rtx count_rtx = gen_int_shift_amount (int_result_mode, count); 10894 rtx new_rtx = simplify_const_binary_operation (code, int_mode, 10895 XEXP (varop, 0), 10896 count_rtx); 10897 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1)); 10898 count = 0; 10899 continue; 10900 } 10901 break; 10902 10903 case NOT: 10904 /* The following rules apply only to scalars. */ 10905 if (shift_mode != shift_unit_mode) 10906 break; 10907 10908 /* Make this fit the case below. */ 10909 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx); 10910 continue; 10911 10912 case IOR: 10913 case AND: 10914 case XOR: 10915 /* The following rules apply only to scalars. */ 10916 if (shift_mode != shift_unit_mode) 10917 break; 10918 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop)); 10919 int_result_mode = as_a <scalar_int_mode> (result_mode); 10920 10921 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C) 10922 with C the size of VAROP - 1 and the shift is logical if 10923 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1, 10924 we have an (le X 0) operation. If we have an arithmetic shift 10925 and STORE_FLAG_VALUE is 1 or we have a logical shift with 10926 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */ 10927 10928 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS 10929 && XEXP (XEXP (varop, 0), 1) == constm1_rtx 10930 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) 10931 && (code == LSHIFTRT || code == ASHIFTRT) 10932 && count == (GET_MODE_PRECISION (int_varop_mode) - 1) 10933 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1))) 10934 { 10935 count = 0; 10936 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1), 10937 const0_rtx); 10938 10939 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT) 10940 varop = gen_rtx_NEG (int_varop_mode, varop); 10941 10942 continue; 10943 } 10944 10945 /* If we have (shift (logical)), move the logical to the outside 10946 to allow it to possibly combine with another logical and the 10947 shift to combine with another shift. This also canonicalizes to 10948 what a ZERO_EXTRACT looks like. Also, some machines have 10949 (and (shift)) insns. */ 10950 10951 if (CONST_INT_P (XEXP (varop, 1)) 10952 /* We can't do this if we have (ashiftrt (xor)) and the 10953 constant has its sign bit set in shift_unit_mode with 10954 shift_unit_mode wider than result_mode. */ 10955 && !(code == ASHIFTRT && GET_CODE (varop) == XOR 10956 && int_result_mode != shift_unit_mode 10957 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)), 10958 shift_unit_mode) < 0) 10959 && (new_rtx = simplify_const_binary_operation 10960 (code, int_result_mode, 10961 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode), 10962 gen_int_shift_amount (int_result_mode, count))) != 0 10963 && CONST_INT_P (new_rtx) 10964 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop), 10965 INTVAL (new_rtx), int_result_mode, 10966 &complement_p)) 10967 { 10968 varop = XEXP (varop, 0); 10969 continue; 10970 } 10971 10972 /* If we can't do that, try to simplify the shift in each arm of the 10973 logical expression, make a new logical expression, and apply 10974 the inverse distributive law. This also can't be done for 10975 (ashiftrt (xor)) where we've widened the shift and the constant 10976 changes the sign bit. */ 10977 if (CONST_INT_P (XEXP (varop, 1)) 10978 && !(code == ASHIFTRT && GET_CODE (varop) == XOR 10979 && int_result_mode != shift_unit_mode 10980 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)), 10981 shift_unit_mode) < 0)) 10982 { 10983 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode, 10984 XEXP (varop, 0), count); 10985 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode, 10986 XEXP (varop, 1), count); 10987 10988 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode, 10989 lhs, rhs); 10990 varop = apply_distributive_law (varop); 10991 10992 count = 0; 10993 continue; 10994 } 10995 break; 10996 10997 case EQ: 10998 /* The following rules apply only to scalars. */ 10999 if (shift_mode != shift_unit_mode) 11000 break; 11001 int_result_mode = as_a <scalar_int_mode> (result_mode); 11002 11003 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE 11004 says that the sign bit can be tested, FOO has mode MODE, C is 11005 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit 11006 that may be nonzero. */ 11007 if (code == LSHIFTRT 11008 && XEXP (varop, 1) == const0_rtx 11009 && GET_MODE (XEXP (varop, 0)) == int_result_mode 11010 && count == (GET_MODE_PRECISION (int_result_mode) - 1) 11011 && HWI_COMPUTABLE_MODE_P (int_result_mode) 11012 && STORE_FLAG_VALUE == -1 11013 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1 11014 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, 11015 int_result_mode, &complement_p)) 11016 { 11017 varop = XEXP (varop, 0); 11018 count = 0; 11019 continue; 11020 } 11021 break; 11022 11023 case NEG: 11024 /* The following rules apply only to scalars. */ 11025 if (shift_mode != shift_unit_mode) 11026 break; 11027 int_result_mode = as_a <scalar_int_mode> (result_mode); 11028 11029 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less 11030 than the number of bits in the mode is equivalent to A. */ 11031 if (code == LSHIFTRT 11032 && count == (GET_MODE_PRECISION (int_result_mode) - 1) 11033 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1) 11034 { 11035 varop = XEXP (varop, 0); 11036 count = 0; 11037 continue; 11038 } 11039 11040 /* NEG commutes with ASHIFT since it is multiplication. Move the 11041 NEG outside to allow shifts to combine. */ 11042 if (code == ASHIFT 11043 && merge_outer_ops (&outer_op, &outer_const, NEG, 0, 11044 int_result_mode, &complement_p)) 11045 { 11046 varop = XEXP (varop, 0); 11047 continue; 11048 } 11049 break; 11050 11051 case PLUS: 11052 /* The following rules apply only to scalars. */ 11053 if (shift_mode != shift_unit_mode) 11054 break; 11055 int_result_mode = as_a <scalar_int_mode> (result_mode); 11056 11057 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C 11058 is one less than the number of bits in the mode is 11059 equivalent to (xor A 1). */ 11060 if (code == LSHIFTRT 11061 && count == (GET_MODE_PRECISION (int_result_mode) - 1) 11062 && XEXP (varop, 1) == constm1_rtx 11063 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1 11064 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, 11065 int_result_mode, &complement_p)) 11066 { 11067 count = 0; 11068 varop = XEXP (varop, 0); 11069 continue; 11070 } 11071 11072 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits 11073 that might be nonzero in BAR are those being shifted out and those 11074 bits are known zero in FOO, we can replace the PLUS with FOO. 11075 Similarly in the other operand order. This code occurs when 11076 we are computing the size of a variable-size array. */ 11077 11078 if ((code == ASHIFTRT || code == LSHIFTRT) 11079 && count < HOST_BITS_PER_WIDE_INT 11080 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0 11081 && (nonzero_bits (XEXP (varop, 1), int_result_mode) 11082 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0) 11083 { 11084 varop = XEXP (varop, 0); 11085 continue; 11086 } 11087 else if ((code == ASHIFTRT || code == LSHIFTRT) 11088 && count < HOST_BITS_PER_WIDE_INT 11089 && HWI_COMPUTABLE_MODE_P (int_result_mode) 11090 && (nonzero_bits (XEXP (varop, 0), int_result_mode) 11091 >> count) == 0 11092 && (nonzero_bits (XEXP (varop, 0), int_result_mode) 11093 & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0) 11094 { 11095 varop = XEXP (varop, 1); 11096 continue; 11097 } 11098 11099 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */ 11100 if (code == ASHIFT 11101 && CONST_INT_P (XEXP (varop, 1)) 11102 && (new_rtx = simplify_const_binary_operation 11103 (ASHIFT, int_result_mode, 11104 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode), 11105 gen_int_shift_amount (int_result_mode, count))) != 0 11106 && CONST_INT_P (new_rtx) 11107 && merge_outer_ops (&outer_op, &outer_const, PLUS, 11108 INTVAL (new_rtx), int_result_mode, 11109 &complement_p)) 11110 { 11111 varop = XEXP (varop, 0); 11112 continue; 11113 } 11114 11115 /* Check for 'PLUS signbit', which is the canonical form of 'XOR 11116 signbit', and attempt to change the PLUS to an XOR and move it to 11117 the outer operation as is done above in the AND/IOR/XOR case 11118 leg for shift(logical). See details in logical handling above 11119 for reasoning in doing so. */ 11120 if (code == LSHIFTRT 11121 && CONST_INT_P (XEXP (varop, 1)) 11122 && mode_signbit_p (int_result_mode, XEXP (varop, 1)) 11123 && (new_rtx = simplify_const_binary_operation 11124 (code, int_result_mode, 11125 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode), 11126 gen_int_shift_amount (int_result_mode, count))) != 0 11127 && CONST_INT_P (new_rtx) 11128 && merge_outer_ops (&outer_op, &outer_const, XOR, 11129 INTVAL (new_rtx), int_result_mode, 11130 &complement_p)) 11131 { 11132 varop = XEXP (varop, 0); 11133 continue; 11134 } 11135 11136 break; 11137 11138 case MINUS: 11139 /* The following rules apply only to scalars. */ 11140 if (shift_mode != shift_unit_mode) 11141 break; 11142 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop)); 11143 11144 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C) 11145 with C the size of VAROP - 1 and the shift is logical if 11146 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1, 11147 we have a (gt X 0) operation. If the shift is arithmetic with 11148 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1, 11149 we have a (neg (gt X 0)) operation. */ 11150 11151 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) 11152 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT 11153 && count == (GET_MODE_PRECISION (int_varop_mode) - 1) 11154 && (code == LSHIFTRT || code == ASHIFTRT) 11155 && CONST_INT_P (XEXP (XEXP (varop, 0), 1)) 11156 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count 11157 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1))) 11158 { 11159 count = 0; 11160 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1), 11161 const0_rtx); 11162 11163 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT) 11164 varop = gen_rtx_NEG (int_varop_mode, varop); 11165 11166 continue; 11167 } 11168 break; 11169 11170 case TRUNCATE: 11171 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt)) 11172 if the truncate does not affect the value. */ 11173 if (code == LSHIFTRT 11174 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT 11175 && CONST_INT_P (XEXP (XEXP (varop, 0), 1)) 11176 && (INTVAL (XEXP (XEXP (varop, 0), 1)) 11177 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0))) 11178 - GET_MODE_UNIT_PRECISION (GET_MODE (varop))))) 11179 { 11180 rtx varop_inner = XEXP (varop, 0); 11181 int new_count = count + INTVAL (XEXP (varop_inner, 1)); 11182 rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner), 11183 new_count); 11184 varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner), 11185 XEXP (varop_inner, 0), 11186 new_count_rtx); 11187 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner); 11188 count = 0; 11189 continue; 11190 } 11191 break; 11192 11193 default: 11194 break; 11195 } 11196 11197 break; 11198 } 11199 11200 shift_mode = result_mode; 11201 if (shift_mode != mode) 11202 { 11203 /* We only change the modes of scalar shifts. */ 11204 int_mode = as_a <scalar_int_mode> (mode); 11205 int_result_mode = as_a <scalar_int_mode> (result_mode); 11206 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode, 11207 int_mode, outer_op, outer_const); 11208 } 11209 11210 /* We have now finished analyzing the shift. The result should be 11211 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If 11212 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied 11213 to the result of the shift. OUTER_CONST is the relevant constant, 11214 but we must turn off all bits turned off in the shift. */ 11215 11216 if (outer_op == UNKNOWN 11217 && orig_code == code && orig_count == count 11218 && varop == orig_varop 11219 && shift_mode == GET_MODE (varop)) 11220 return NULL_RTX; 11221 11222 /* Make a SUBREG if necessary. If we can't make it, fail. */ 11223 varop = gen_lowpart (shift_mode, varop); 11224 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER) 11225 return NULL_RTX; 11226 11227 /* If we have an outer operation and we just made a shift, it is 11228 possible that we could have simplified the shift were it not 11229 for the outer operation. So try to do the simplification 11230 recursively. */ 11231 11232 if (outer_op != UNKNOWN) 11233 x = simplify_shift_const_1 (code, shift_mode, varop, count); 11234 else 11235 x = NULL_RTX; 11236 11237 if (x == NULL_RTX) 11238 x = simplify_gen_binary (code, shift_mode, varop, 11239 gen_int_shift_amount (shift_mode, count)); 11240 11241 /* If we were doing an LSHIFTRT in a wider mode than it was originally, 11242 turn off all the bits that the shift would have turned off. */ 11243 if (orig_code == LSHIFTRT && result_mode != shift_mode) 11244 /* We only change the modes of scalar shifts. */ 11245 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode), 11246 x, GET_MODE_MASK (result_mode) >> orig_count); 11247 11248 /* Do the remainder of the processing in RESULT_MODE. */ 11249 x = gen_lowpart_or_truncate (result_mode, x); 11250 11251 /* If COMPLEMENT_P is set, we have to complement X before doing the outer 11252 operation. */ 11253 if (complement_p) 11254 x = simplify_gen_unary (NOT, result_mode, x, result_mode); 11255 11256 if (outer_op != UNKNOWN) 11257 { 11258 int_result_mode = as_a <scalar_int_mode> (result_mode); 11259 11260 if (GET_RTX_CLASS (outer_op) != RTX_UNARY 11261 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT) 11262 outer_const = trunc_int_for_mode (outer_const, int_result_mode); 11263 11264 if (outer_op == AND) 11265 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const); 11266 else if (outer_op == SET) 11267 { 11268 /* This means that we have determined that the result is 11269 equivalent to a constant. This should be rare. */ 11270 if (!side_effects_p (x)) 11271 x = GEN_INT (outer_const); 11272 } 11273 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY) 11274 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode); 11275 else 11276 x = simplify_gen_binary (outer_op, int_result_mode, x, 11277 GEN_INT (outer_const)); 11278 } 11279 11280 return x; 11281 } 11282 11283 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift. 11284 The result of the shift is RESULT_MODE. If we cannot simplify it, 11285 return X or, if it is NULL, synthesize the expression with 11286 simplify_gen_binary. Otherwise, return a simplified value. 11287 11288 The shift is normally computed in the widest mode we find in VAROP, as 11289 long as it isn't a different number of words than RESULT_MODE. Exceptions 11290 are ASHIFTRT and ROTATE, which are always done in their original mode. */ 11291 11292 static rtx 11293 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode, 11294 rtx varop, int count) 11295 { 11296 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count); 11297 if (tem) 11298 return tem; 11299 11300 if (!x) 11301 x = simplify_gen_binary (code, GET_MODE (varop), varop, 11302 gen_int_shift_amount (GET_MODE (varop), count)); 11303 if (GET_MODE (x) != result_mode) 11304 x = gen_lowpart (result_mode, x); 11305 return x; 11306 } 11307 11308 11309 /* A subroutine of recog_for_combine. See there for arguments and 11310 return value. */ 11311 11312 static int 11313 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes) 11314 { 11315 rtx pat = *pnewpat; 11316 rtx pat_without_clobbers; 11317 int insn_code_number; 11318 int num_clobbers_to_add = 0; 11319 int i; 11320 rtx notes = NULL_RTX; 11321 rtx old_notes, old_pat; 11322 int old_icode; 11323 11324 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER 11325 we use to indicate that something didn't match. If we find such a 11326 thing, force rejection. */ 11327 if (GET_CODE (pat) == PARALLEL) 11328 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--) 11329 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER 11330 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx) 11331 return -1; 11332 11333 old_pat = PATTERN (insn); 11334 old_notes = REG_NOTES (insn); 11335 PATTERN (insn) = pat; 11336 REG_NOTES (insn) = NULL_RTX; 11337 11338 insn_code_number = recog (pat, insn, &num_clobbers_to_add); 11339 if (dump_file && (dump_flags & TDF_DETAILS)) 11340 { 11341 if (insn_code_number < 0) 11342 fputs ("Failed to match this instruction:\n", dump_file); 11343 else 11344 fputs ("Successfully matched this instruction:\n", dump_file); 11345 print_rtl_single (dump_file, pat); 11346 } 11347 11348 /* If it isn't, there is the possibility that we previously had an insn 11349 that clobbered some register as a side effect, but the combined 11350 insn doesn't need to do that. So try once more without the clobbers 11351 unless this represents an ASM insn. */ 11352 11353 if (insn_code_number < 0 && ! check_asm_operands (pat) 11354 && GET_CODE (pat) == PARALLEL) 11355 { 11356 int pos; 11357 11358 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++) 11359 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER) 11360 { 11361 if (i != pos) 11362 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i)); 11363 pos++; 11364 } 11365 11366 SUBST_INT (XVECLEN (pat, 0), pos); 11367 11368 if (pos == 1) 11369 pat = XVECEXP (pat, 0, 0); 11370 11371 PATTERN (insn) = pat; 11372 insn_code_number = recog (pat, insn, &num_clobbers_to_add); 11373 if (dump_file && (dump_flags & TDF_DETAILS)) 11374 { 11375 if (insn_code_number < 0) 11376 fputs ("Failed to match this instruction:\n", dump_file); 11377 else 11378 fputs ("Successfully matched this instruction:\n", dump_file); 11379 print_rtl_single (dump_file, pat); 11380 } 11381 } 11382 11383 pat_without_clobbers = pat; 11384 11385 PATTERN (insn) = old_pat; 11386 REG_NOTES (insn) = old_notes; 11387 11388 /* Recognize all noop sets, these will be killed by followup pass. */ 11389 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat)) 11390 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0; 11391 11392 /* If we had any clobbers to add, make a new pattern than contains 11393 them. Then check to make sure that all of them are dead. */ 11394 if (num_clobbers_to_add) 11395 { 11396 rtx newpat = gen_rtx_PARALLEL (VOIDmode, 11397 rtvec_alloc (GET_CODE (pat) == PARALLEL 11398 ? (XVECLEN (pat, 0) 11399 + num_clobbers_to_add) 11400 : num_clobbers_to_add + 1)); 11401 11402 if (GET_CODE (pat) == PARALLEL) 11403 for (i = 0; i < XVECLEN (pat, 0); i++) 11404 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i); 11405 else 11406 XVECEXP (newpat, 0, 0) = pat; 11407 11408 add_clobbers (newpat, insn_code_number); 11409 11410 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add; 11411 i < XVECLEN (newpat, 0); i++) 11412 { 11413 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)) 11414 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn)) 11415 return -1; 11416 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH) 11417 { 11418 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))); 11419 notes = alloc_reg_note (REG_UNUSED, 11420 XEXP (XVECEXP (newpat, 0, i), 0), notes); 11421 } 11422 } 11423 pat = newpat; 11424 } 11425 11426 if (insn_code_number >= 0 11427 && insn_code_number != NOOP_MOVE_INSN_CODE) 11428 { 11429 old_pat = PATTERN (insn); 11430 old_notes = REG_NOTES (insn); 11431 old_icode = INSN_CODE (insn); 11432 PATTERN (insn) = pat; 11433 REG_NOTES (insn) = notes; 11434 INSN_CODE (insn) = insn_code_number; 11435 11436 /* Allow targets to reject combined insn. */ 11437 if (!targetm.legitimate_combined_insn (insn)) 11438 { 11439 if (dump_file && (dump_flags & TDF_DETAILS)) 11440 fputs ("Instruction not appropriate for target.", 11441 dump_file); 11442 11443 /* Callers expect recog_for_combine to strip 11444 clobbers from the pattern on failure. */ 11445 pat = pat_without_clobbers; 11446 notes = NULL_RTX; 11447 11448 insn_code_number = -1; 11449 } 11450 11451 PATTERN (insn) = old_pat; 11452 REG_NOTES (insn) = old_notes; 11453 INSN_CODE (insn) = old_icode; 11454 } 11455 11456 *pnewpat = pat; 11457 *pnotes = notes; 11458 11459 return insn_code_number; 11460 } 11461 11462 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be 11463 expressed as an AND and maybe an LSHIFTRT, to that formulation. 11464 Return whether anything was so changed. */ 11465 11466 static bool 11467 change_zero_ext (rtx pat) 11468 { 11469 bool changed = false; 11470 rtx *src = &SET_SRC (pat); 11471 11472 subrtx_ptr_iterator::array_type array; 11473 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST) 11474 { 11475 rtx x = **iter; 11476 scalar_int_mode mode, inner_mode; 11477 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode)) 11478 continue; 11479 int size; 11480 11481 if (GET_CODE (x) == ZERO_EXTRACT 11482 && CONST_INT_P (XEXP (x, 1)) 11483 && CONST_INT_P (XEXP (x, 2)) 11484 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode) 11485 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode)) 11486 { 11487 size = INTVAL (XEXP (x, 1)); 11488 11489 int start = INTVAL (XEXP (x, 2)); 11490 if (BITS_BIG_ENDIAN) 11491 start = GET_MODE_PRECISION (inner_mode) - size - start; 11492 11493 if (start != 0) 11494 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0), 11495 gen_int_shift_amount (inner_mode, start)); 11496 else 11497 x = XEXP (x, 0); 11498 11499 if (mode != inner_mode) 11500 { 11501 if (REG_P (x) && HARD_REGISTER_P (x) 11502 && !can_change_dest_mode (x, 0, mode)) 11503 continue; 11504 11505 x = gen_lowpart_SUBREG (mode, x); 11506 } 11507 } 11508 else if (GET_CODE (x) == ZERO_EXTEND 11509 && GET_CODE (XEXP (x, 0)) == SUBREG 11510 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0)))) 11511 && !paradoxical_subreg_p (XEXP (x, 0)) 11512 && subreg_lowpart_p (XEXP (x, 0))) 11513 { 11514 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0))); 11515 size = GET_MODE_PRECISION (inner_mode); 11516 x = SUBREG_REG (XEXP (x, 0)); 11517 if (GET_MODE (x) != mode) 11518 { 11519 if (REG_P (x) && HARD_REGISTER_P (x) 11520 && !can_change_dest_mode (x, 0, mode)) 11521 continue; 11522 11523 x = gen_lowpart_SUBREG (mode, x); 11524 } 11525 } 11526 else if (GET_CODE (x) == ZERO_EXTEND 11527 && REG_P (XEXP (x, 0)) 11528 && HARD_REGISTER_P (XEXP (x, 0)) 11529 && can_change_dest_mode (XEXP (x, 0), 0, mode)) 11530 { 11531 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0))); 11532 size = GET_MODE_PRECISION (inner_mode); 11533 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0))); 11534 } 11535 else 11536 continue; 11537 11538 if (!(GET_CODE (x) == LSHIFTRT 11539 && CONST_INT_P (XEXP (x, 1)) 11540 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode))) 11541 { 11542 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode)); 11543 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode)); 11544 } 11545 11546 SUBST (**iter, x); 11547 changed = true; 11548 } 11549 11550 if (changed) 11551 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST) 11552 maybe_swap_commutative_operands (**iter); 11553 11554 rtx *dst = &SET_DEST (pat); 11555 scalar_int_mode mode; 11556 if (GET_CODE (*dst) == ZERO_EXTRACT 11557 && REG_P (XEXP (*dst, 0)) 11558 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode) 11559 && CONST_INT_P (XEXP (*dst, 1)) 11560 && CONST_INT_P (XEXP (*dst, 2))) 11561 { 11562 rtx reg = XEXP (*dst, 0); 11563 int width = INTVAL (XEXP (*dst, 1)); 11564 int offset = INTVAL (XEXP (*dst, 2)); 11565 int reg_width = GET_MODE_PRECISION (mode); 11566 if (BITS_BIG_ENDIAN) 11567 offset = reg_width - width - offset; 11568 11569 rtx x, y, z, w; 11570 wide_int mask = wi::shifted_mask (offset, width, true, reg_width); 11571 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width); 11572 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode)); 11573 if (offset) 11574 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset)); 11575 else 11576 y = SET_SRC (pat); 11577 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode)); 11578 w = gen_rtx_IOR (mode, x, z); 11579 SUBST (SET_DEST (pat), reg); 11580 SUBST (SET_SRC (pat), w); 11581 11582 changed = true; 11583 } 11584 11585 return changed; 11586 } 11587 11588 /* Like recog, but we receive the address of a pointer to a new pattern. 11589 We try to match the rtx that the pointer points to. 11590 If that fails, we may try to modify or replace the pattern, 11591 storing the replacement into the same pointer object. 11592 11593 Modifications include deletion or addition of CLOBBERs. If the 11594 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT 11595 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that 11596 (and undo if that fails). 11597 11598 PNOTES is a pointer to a location where any REG_UNUSED notes added for 11599 the CLOBBERs are placed. 11600 11601 The value is the final insn code from the pattern ultimately matched, 11602 or -1. */ 11603 11604 static int 11605 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes) 11606 { 11607 rtx pat = *pnewpat; 11608 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes); 11609 if (insn_code_number >= 0 || check_asm_operands (pat)) 11610 return insn_code_number; 11611 11612 void *marker = get_undo_marker (); 11613 bool changed = false; 11614 11615 if (GET_CODE (pat) == SET) 11616 changed = change_zero_ext (pat); 11617 else if (GET_CODE (pat) == PARALLEL) 11618 { 11619 int i; 11620 for (i = 0; i < XVECLEN (pat, 0); i++) 11621 { 11622 rtx set = XVECEXP (pat, 0, i); 11623 if (GET_CODE (set) == SET) 11624 changed |= change_zero_ext (set); 11625 } 11626 } 11627 11628 if (changed) 11629 { 11630 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes); 11631 11632 if (insn_code_number < 0) 11633 undo_to_marker (marker); 11634 } 11635 11636 return insn_code_number; 11637 } 11638 11639 /* Like gen_lowpart_general but for use by combine. In combine it 11640 is not possible to create any new pseudoregs. However, it is 11641 safe to create invalid memory addresses, because combine will 11642 try to recognize them and all they will do is make the combine 11643 attempt fail. 11644 11645 If for some reason this cannot do its job, an rtx 11646 (clobber (const_int 0)) is returned. 11647 An insn containing that will not be recognized. */ 11648 11649 static rtx 11650 gen_lowpart_for_combine (machine_mode omode, rtx x) 11651 { 11652 machine_mode imode = GET_MODE (x); 11653 rtx result; 11654 11655 if (omode == imode) 11656 return x; 11657 11658 /* We can only support MODE being wider than a word if X is a 11659 constant integer or has a mode the same size. */ 11660 if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD) 11661 && ! (CONST_SCALAR_INT_P (x) 11662 || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode)))) 11663 goto fail; 11664 11665 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart 11666 won't know what to do. So we will strip off the SUBREG here and 11667 process normally. */ 11668 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x))) 11669 { 11670 x = SUBREG_REG (x); 11671 11672 /* For use in case we fall down into the address adjustments 11673 further below, we need to adjust the known mode and size of 11674 x; imode and isize, since we just adjusted x. */ 11675 imode = GET_MODE (x); 11676 11677 if (imode == omode) 11678 return x; 11679 } 11680 11681 result = gen_lowpart_common (omode, x); 11682 11683 if (result) 11684 return result; 11685 11686 if (MEM_P (x)) 11687 { 11688 /* Refuse to work on a volatile memory ref or one with a mode-dependent 11689 address. */ 11690 if (MEM_VOLATILE_P (x) 11691 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x))) 11692 goto fail; 11693 11694 /* If we want to refer to something bigger than the original memref, 11695 generate a paradoxical subreg instead. That will force a reload 11696 of the original memref X. */ 11697 if (paradoxical_subreg_p (omode, imode)) 11698 return gen_rtx_SUBREG (omode, x, 0); 11699 11700 poly_int64 offset = byte_lowpart_offset (omode, imode); 11701 return adjust_address_nv (x, omode, offset); 11702 } 11703 11704 /* If X is a comparison operator, rewrite it in a new mode. This 11705 probably won't match, but may allow further simplifications. */ 11706 else if (COMPARISON_P (x)) 11707 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1)); 11708 11709 /* If we couldn't simplify X any other way, just enclose it in a 11710 SUBREG. Normally, this SUBREG won't match, but some patterns may 11711 include an explicit SUBREG or we may simplify it further in combine. */ 11712 else 11713 { 11714 rtx res; 11715 11716 if (imode == VOIDmode) 11717 { 11718 imode = int_mode_for_mode (omode).require (); 11719 x = gen_lowpart_common (imode, x); 11720 if (x == NULL) 11721 goto fail; 11722 } 11723 res = lowpart_subreg (omode, x, imode); 11724 if (res) 11725 return res; 11726 } 11727 11728 fail: 11729 return gen_rtx_CLOBBER (omode, const0_rtx); 11730 } 11731 11732 /* Try to simplify a comparison between OP0 and a constant OP1, 11733 where CODE is the comparison code that will be tested, into a 11734 (CODE OP0 const0_rtx) form. 11735 11736 The result is a possibly different comparison code to use. 11737 *POP1 may be updated. */ 11738 11739 static enum rtx_code 11740 simplify_compare_const (enum rtx_code code, machine_mode mode, 11741 rtx op0, rtx *pop1) 11742 { 11743 scalar_int_mode int_mode; 11744 HOST_WIDE_INT const_op = INTVAL (*pop1); 11745 11746 /* Get the constant we are comparing against and turn off all bits 11747 not on in our mode. */ 11748 if (mode != VOIDmode) 11749 const_op = trunc_int_for_mode (const_op, mode); 11750 11751 /* If we are comparing against a constant power of two and the value 11752 being compared can only have that single bit nonzero (e.g., it was 11753 `and'ed with that bit), we can replace this with a comparison 11754 with zero. */ 11755 if (const_op 11756 && (code == EQ || code == NE || code == GE || code == GEU 11757 || code == LT || code == LTU) 11758 && is_a <scalar_int_mode> (mode, &int_mode) 11759 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT 11760 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode)) 11761 && (nonzero_bits (op0, int_mode) 11762 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode)))) 11763 { 11764 code = (code == EQ || code == GE || code == GEU ? NE : EQ); 11765 const_op = 0; 11766 } 11767 11768 /* Similarly, if we are comparing a value known to be either -1 or 11769 0 with -1, change it to the opposite comparison against zero. */ 11770 if (const_op == -1 11771 && (code == EQ || code == NE || code == GT || code == LE 11772 || code == GEU || code == LTU) 11773 && is_a <scalar_int_mode> (mode, &int_mode) 11774 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode)) 11775 { 11776 code = (code == EQ || code == LE || code == GEU ? NE : EQ); 11777 const_op = 0; 11778 } 11779 11780 /* Do some canonicalizations based on the comparison code. We prefer 11781 comparisons against zero and then prefer equality comparisons. 11782 If we can reduce the size of a constant, we will do that too. */ 11783 switch (code) 11784 { 11785 case LT: 11786 /* < C is equivalent to <= (C - 1) */ 11787 if (const_op > 0) 11788 { 11789 const_op -= 1; 11790 code = LE; 11791 /* ... fall through to LE case below. */ 11792 gcc_fallthrough (); 11793 } 11794 else 11795 break; 11796 11797 case LE: 11798 /* <= C is equivalent to < (C + 1); we do this for C < 0 */ 11799 if (const_op < 0) 11800 { 11801 const_op += 1; 11802 code = LT; 11803 } 11804 11805 /* If we are doing a <= 0 comparison on a value known to have 11806 a zero sign bit, we can replace this with == 0. */ 11807 else if (const_op == 0 11808 && is_a <scalar_int_mode> (mode, &int_mode) 11809 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT 11810 && (nonzero_bits (op0, int_mode) 11811 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1))) 11812 == 0) 11813 code = EQ; 11814 break; 11815 11816 case GE: 11817 /* >= C is equivalent to > (C - 1). */ 11818 if (const_op > 0) 11819 { 11820 const_op -= 1; 11821 code = GT; 11822 /* ... fall through to GT below. */ 11823 gcc_fallthrough (); 11824 } 11825 else 11826 break; 11827 11828 case GT: 11829 /* > C is equivalent to >= (C + 1); we do this for C < 0. */ 11830 if (const_op < 0) 11831 { 11832 const_op += 1; 11833 code = GE; 11834 } 11835 11836 /* If we are doing a > 0 comparison on a value known to have 11837 a zero sign bit, we can replace this with != 0. */ 11838 else if (const_op == 0 11839 && is_a <scalar_int_mode> (mode, &int_mode) 11840 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT 11841 && (nonzero_bits (op0, int_mode) 11842 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1))) 11843 == 0) 11844 code = NE; 11845 break; 11846 11847 case LTU: 11848 /* < C is equivalent to <= (C - 1). */ 11849 if (const_op > 0) 11850 { 11851 const_op -= 1; 11852 code = LEU; 11853 /* ... fall through ... */ 11854 gcc_fallthrough (); 11855 } 11856 /* (unsigned) < 0x80000000 is equivalent to >= 0. */ 11857 else if (is_a <scalar_int_mode> (mode, &int_mode) 11858 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT 11859 && ((unsigned HOST_WIDE_INT) const_op 11860 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1))) 11861 { 11862 const_op = 0; 11863 code = GE; 11864 break; 11865 } 11866 else 11867 break; 11868 11869 case LEU: 11870 /* unsigned <= 0 is equivalent to == 0 */ 11871 if (const_op == 0) 11872 code = EQ; 11873 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */ 11874 else if (is_a <scalar_int_mode> (mode, &int_mode) 11875 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT 11876 && ((unsigned HOST_WIDE_INT) const_op 11877 == ((HOST_WIDE_INT_1U 11878 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))) 11879 { 11880 const_op = 0; 11881 code = GE; 11882 } 11883 break; 11884 11885 case GEU: 11886 /* >= C is equivalent to > (C - 1). */ 11887 if (const_op > 1) 11888 { 11889 const_op -= 1; 11890 code = GTU; 11891 /* ... fall through ... */ 11892 gcc_fallthrough (); 11893 } 11894 11895 /* (unsigned) >= 0x80000000 is equivalent to < 0. */ 11896 else if (is_a <scalar_int_mode> (mode, &int_mode) 11897 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT 11898 && ((unsigned HOST_WIDE_INT) const_op 11899 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1))) 11900 { 11901 const_op = 0; 11902 code = LT; 11903 break; 11904 } 11905 else 11906 break; 11907 11908 case GTU: 11909 /* unsigned > 0 is equivalent to != 0 */ 11910 if (const_op == 0) 11911 code = NE; 11912 /* (unsigned) > 0x7fffffff is equivalent to < 0. */ 11913 else if (is_a <scalar_int_mode> (mode, &int_mode) 11914 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT 11915 && ((unsigned HOST_WIDE_INT) const_op 11916 == (HOST_WIDE_INT_1U 11917 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)) 11918 { 11919 const_op = 0; 11920 code = LT; 11921 } 11922 break; 11923 11924 default: 11925 break; 11926 } 11927 11928 *pop1 = GEN_INT (const_op); 11929 return code; 11930 } 11931 11932 /* Simplify a comparison between *POP0 and *POP1 where CODE is the 11933 comparison code that will be tested. 11934 11935 The result is a possibly different comparison code to use. *POP0 and 11936 *POP1 may be updated. 11937 11938 It is possible that we might detect that a comparison is either always 11939 true or always false. However, we do not perform general constant 11940 folding in combine, so this knowledge isn't useful. Such tautologies 11941 should have been detected earlier. Hence we ignore all such cases. */ 11942 11943 static enum rtx_code 11944 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) 11945 { 11946 rtx op0 = *pop0; 11947 rtx op1 = *pop1; 11948 rtx tem, tem1; 11949 int i; 11950 scalar_int_mode mode, inner_mode, tmode; 11951 opt_scalar_int_mode tmode_iter; 11952 11953 /* Try a few ways of applying the same transformation to both operands. */ 11954 while (1) 11955 { 11956 /* The test below this one won't handle SIGN_EXTENDs on these machines, 11957 so check specially. */ 11958 if (!WORD_REGISTER_OPERATIONS 11959 && code != GTU && code != GEU && code != LTU && code != LEU 11960 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT 11961 && GET_CODE (XEXP (op0, 0)) == ASHIFT 11962 && GET_CODE (XEXP (op1, 0)) == ASHIFT 11963 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG 11964 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG 11965 && is_a <scalar_int_mode> (GET_MODE (op0), &mode) 11966 && (is_a <scalar_int_mode> 11967 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode)) 11968 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))) 11969 && CONST_INT_P (XEXP (op0, 1)) 11970 && XEXP (op0, 1) == XEXP (op1, 1) 11971 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1) 11972 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1) 11973 && (INTVAL (XEXP (op0, 1)) 11974 == (GET_MODE_PRECISION (mode) 11975 - GET_MODE_PRECISION (inner_mode)))) 11976 { 11977 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0)); 11978 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0)); 11979 } 11980 11981 /* If both operands are the same constant shift, see if we can ignore the 11982 shift. We can if the shift is a rotate or if the bits shifted out of 11983 this shift are known to be zero for both inputs and if the type of 11984 comparison is compatible with the shift. */ 11985 if (GET_CODE (op0) == GET_CODE (op1) 11986 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0)) 11987 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ)) 11988 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT) 11989 && (code != GT && code != LT && code != GE && code != LE)) 11990 || (GET_CODE (op0) == ASHIFTRT 11991 && (code != GTU && code != LTU 11992 && code != GEU && code != LEU))) 11993 && CONST_INT_P (XEXP (op0, 1)) 11994 && INTVAL (XEXP (op0, 1)) >= 0 11995 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT 11996 && XEXP (op0, 1) == XEXP (op1, 1)) 11997 { 11998 machine_mode mode = GET_MODE (op0); 11999 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode); 12000 int shift_count = INTVAL (XEXP (op0, 1)); 12001 12002 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT) 12003 mask &= (mask >> shift_count) << shift_count; 12004 else if (GET_CODE (op0) == ASHIFT) 12005 mask = (mask & (mask << shift_count)) >> shift_count; 12006 12007 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0 12008 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0) 12009 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0); 12010 else 12011 break; 12012 } 12013 12014 /* If both operands are AND's of a paradoxical SUBREG by constant, the 12015 SUBREGs are of the same mode, and, in both cases, the AND would 12016 be redundant if the comparison was done in the narrower mode, 12017 do the comparison in the narrower mode (e.g., we are AND'ing with 1 12018 and the operand's possibly nonzero bits are 0xffffff01; in that case 12019 if we only care about QImode, we don't need the AND). This case 12020 occurs if the output mode of an scc insn is not SImode and 12021 STORE_FLAG_VALUE == 1 (e.g., the 386). 12022 12023 Similarly, check for a case where the AND's are ZERO_EXTEND 12024 operations from some narrower mode even though a SUBREG is not 12025 present. */ 12026 12027 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND 12028 && CONST_INT_P (XEXP (op0, 1)) 12029 && CONST_INT_P (XEXP (op1, 1))) 12030 { 12031 rtx inner_op0 = XEXP (op0, 0); 12032 rtx inner_op1 = XEXP (op1, 0); 12033 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1)); 12034 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1)); 12035 int changed = 0; 12036 12037 if (paradoxical_subreg_p (inner_op0) 12038 && GET_CODE (inner_op1) == SUBREG 12039 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0))) 12040 && (GET_MODE (SUBREG_REG (inner_op0)) 12041 == GET_MODE (SUBREG_REG (inner_op1))) 12042 && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0), 12043 GET_MODE (SUBREG_REG (inner_op0)))) == 0 12044 && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1), 12045 GET_MODE (SUBREG_REG (inner_op1)))) == 0) 12046 { 12047 op0 = SUBREG_REG (inner_op0); 12048 op1 = SUBREG_REG (inner_op1); 12049 12050 /* The resulting comparison is always unsigned since we masked 12051 off the original sign bit. */ 12052 code = unsigned_condition (code); 12053 12054 changed = 1; 12055 } 12056 12057 else if (c0 == c1) 12058 FOR_EACH_MODE_UNTIL (tmode, 12059 as_a <scalar_int_mode> (GET_MODE (op0))) 12060 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode)) 12061 { 12062 op0 = gen_lowpart_or_truncate (tmode, inner_op0); 12063 op1 = gen_lowpart_or_truncate (tmode, inner_op1); 12064 code = unsigned_condition (code); 12065 changed = 1; 12066 break; 12067 } 12068 12069 if (! changed) 12070 break; 12071 } 12072 12073 /* If both operands are NOT, we can strip off the outer operation 12074 and adjust the comparison code for swapped operands; similarly for 12075 NEG, except that this must be an equality comparison. */ 12076 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT) 12077 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG 12078 && (code == EQ || code == NE))) 12079 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code); 12080 12081 else 12082 break; 12083 } 12084 12085 /* If the first operand is a constant, swap the operands and adjust the 12086 comparison code appropriately, but don't do this if the second operand 12087 is already a constant integer. */ 12088 if (swap_commutative_operands_p (op0, op1)) 12089 { 12090 std::swap (op0, op1); 12091 code = swap_condition (code); 12092 } 12093 12094 /* We now enter a loop during which we will try to simplify the comparison. 12095 For the most part, we only are concerned with comparisons with zero, 12096 but some things may really be comparisons with zero but not start 12097 out looking that way. */ 12098 12099 while (CONST_INT_P (op1)) 12100 { 12101 machine_mode raw_mode = GET_MODE (op0); 12102 scalar_int_mode int_mode; 12103 int equality_comparison_p; 12104 int sign_bit_comparison_p; 12105 int unsigned_comparison_p; 12106 HOST_WIDE_INT const_op; 12107 12108 /* We only want to handle integral modes. This catches VOIDmode, 12109 CCmode, and the floating-point modes. An exception is that we 12110 can handle VOIDmode if OP0 is a COMPARE or a comparison 12111 operation. */ 12112 12113 if (GET_MODE_CLASS (raw_mode) != MODE_INT 12114 && ! (raw_mode == VOIDmode 12115 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0)))) 12116 break; 12117 12118 /* Try to simplify the compare to constant, possibly changing the 12119 comparison op, and/or changing op1 to zero. */ 12120 code = simplify_compare_const (code, raw_mode, op0, &op1); 12121 const_op = INTVAL (op1); 12122 12123 /* Compute some predicates to simplify code below. */ 12124 12125 equality_comparison_p = (code == EQ || code == NE); 12126 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0); 12127 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU 12128 || code == GEU); 12129 12130 /* If this is a sign bit comparison and we can do arithmetic in 12131 MODE, say that we will only be needing the sign bit of OP0. */ 12132 if (sign_bit_comparison_p 12133 && is_a <scalar_int_mode> (raw_mode, &int_mode) 12134 && HWI_COMPUTABLE_MODE_P (int_mode)) 12135 op0 = force_to_mode (op0, int_mode, 12136 HOST_WIDE_INT_1U 12137 << (GET_MODE_PRECISION (int_mode) - 1), 12138 0); 12139 12140 if (COMPARISON_P (op0)) 12141 { 12142 /* We can't do anything if OP0 is a condition code value, rather 12143 than an actual data value. */ 12144 if (const_op != 0 12145 || CC0_P (XEXP (op0, 0)) 12146 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC) 12147 break; 12148 12149 /* Get the two operands being compared. */ 12150 if (GET_CODE (XEXP (op0, 0)) == COMPARE) 12151 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1); 12152 else 12153 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1); 12154 12155 /* Check for the cases where we simply want the result of the 12156 earlier test or the opposite of that result. */ 12157 if (code == NE || code == EQ 12158 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE) 12159 && (code == LT || code == GE))) 12160 { 12161 enum rtx_code new_code; 12162 if (code == LT || code == NE) 12163 new_code = GET_CODE (op0); 12164 else 12165 new_code = reversed_comparison_code (op0, NULL); 12166 12167 if (new_code != UNKNOWN) 12168 { 12169 code = new_code; 12170 op0 = tem; 12171 op1 = tem1; 12172 continue; 12173 } 12174 } 12175 break; 12176 } 12177 12178 if (raw_mode == VOIDmode) 12179 break; 12180 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode); 12181 12182 /* Now try cases based on the opcode of OP0. If none of the cases 12183 does a "continue", we exit this loop immediately after the 12184 switch. */ 12185 12186 unsigned int mode_width = GET_MODE_PRECISION (mode); 12187 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode); 12188 switch (GET_CODE (op0)) 12189 { 12190 case ZERO_EXTRACT: 12191 /* If we are extracting a single bit from a variable position in 12192 a constant that has only a single bit set and are comparing it 12193 with zero, we can convert this into an equality comparison 12194 between the position and the location of the single bit. */ 12195 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might 12196 have already reduced the shift count modulo the word size. */ 12197 if (!SHIFT_COUNT_TRUNCATED 12198 && CONST_INT_P (XEXP (op0, 0)) 12199 && XEXP (op0, 1) == const1_rtx 12200 && equality_comparison_p && const_op == 0 12201 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0) 12202 { 12203 if (BITS_BIG_ENDIAN) 12204 i = BITS_PER_WORD - 1 - i; 12205 12206 op0 = XEXP (op0, 2); 12207 op1 = GEN_INT (i); 12208 const_op = i; 12209 12210 /* Result is nonzero iff shift count is equal to I. */ 12211 code = reverse_condition (code); 12212 continue; 12213 } 12214 12215 /* fall through */ 12216 12217 case SIGN_EXTRACT: 12218 tem = expand_compound_operation (op0); 12219 if (tem != op0) 12220 { 12221 op0 = tem; 12222 continue; 12223 } 12224 break; 12225 12226 case NOT: 12227 /* If testing for equality, we can take the NOT of the constant. */ 12228 if (equality_comparison_p 12229 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0) 12230 { 12231 op0 = XEXP (op0, 0); 12232 op1 = tem; 12233 continue; 12234 } 12235 12236 /* If just looking at the sign bit, reverse the sense of the 12237 comparison. */ 12238 if (sign_bit_comparison_p) 12239 { 12240 op0 = XEXP (op0, 0); 12241 code = (code == GE ? LT : GE); 12242 continue; 12243 } 12244 break; 12245 12246 case NEG: 12247 /* If testing for equality, we can take the NEG of the constant. */ 12248 if (equality_comparison_p 12249 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0) 12250 { 12251 op0 = XEXP (op0, 0); 12252 op1 = tem; 12253 continue; 12254 } 12255 12256 /* The remaining cases only apply to comparisons with zero. */ 12257 if (const_op != 0) 12258 break; 12259 12260 /* When X is ABS or is known positive, 12261 (neg X) is < 0 if and only if X != 0. */ 12262 12263 if (sign_bit_comparison_p 12264 && (GET_CODE (XEXP (op0, 0)) == ABS 12265 || (mode_width <= HOST_BITS_PER_WIDE_INT 12266 && (nonzero_bits (XEXP (op0, 0), mode) 12267 & (HOST_WIDE_INT_1U << (mode_width - 1))) 12268 == 0))) 12269 { 12270 op0 = XEXP (op0, 0); 12271 code = (code == LT ? NE : EQ); 12272 continue; 12273 } 12274 12275 /* If we have NEG of something whose two high-order bits are the 12276 same, we know that "(-a) < 0" is equivalent to "a > 0". */ 12277 if (num_sign_bit_copies (op0, mode) >= 2) 12278 { 12279 op0 = XEXP (op0, 0); 12280 code = swap_condition (code); 12281 continue; 12282 } 12283 break; 12284 12285 case ROTATE: 12286 /* If we are testing equality and our count is a constant, we 12287 can perform the inverse operation on our RHS. */ 12288 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1)) 12289 && (tem = simplify_binary_operation (ROTATERT, mode, 12290 op1, XEXP (op0, 1))) != 0) 12291 { 12292 op0 = XEXP (op0, 0); 12293 op1 = tem; 12294 continue; 12295 } 12296 12297 /* If we are doing a < 0 or >= 0 comparison, it means we are testing 12298 a particular bit. Convert it to an AND of a constant of that 12299 bit. This will be converted into a ZERO_EXTRACT. */ 12300 if (const_op == 0 && sign_bit_comparison_p 12301 && CONST_INT_P (XEXP (op0, 1)) 12302 && mode_width <= HOST_BITS_PER_WIDE_INT) 12303 { 12304 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 12305 (HOST_WIDE_INT_1U 12306 << (mode_width - 1 12307 - INTVAL (XEXP (op0, 1))))); 12308 code = (code == LT ? NE : EQ); 12309 continue; 12310 } 12311 12312 /* Fall through. */ 12313 12314 case ABS: 12315 /* ABS is ignorable inside an equality comparison with zero. */ 12316 if (const_op == 0 && equality_comparison_p) 12317 { 12318 op0 = XEXP (op0, 0); 12319 continue; 12320 } 12321 break; 12322 12323 case SIGN_EXTEND: 12324 /* Can simplify (compare (zero/sign_extend FOO) CONST) to 12325 (compare FOO CONST) if CONST fits in FOO's mode and we 12326 are either testing inequality or have an unsigned 12327 comparison with ZERO_EXTEND or a signed comparison with 12328 SIGN_EXTEND. But don't do it if we don't have a compare 12329 insn of the given mode, since we'd have to revert it 12330 later on, and then we wouldn't know whether to sign- or 12331 zero-extend. */ 12332 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode) 12333 && ! unsigned_comparison_p 12334 && HWI_COMPUTABLE_MODE_P (mode) 12335 && trunc_int_for_mode (const_op, mode) == const_op 12336 && have_insn_for (COMPARE, mode)) 12337 { 12338 op0 = XEXP (op0, 0); 12339 continue; 12340 } 12341 break; 12342 12343 case SUBREG: 12344 /* Check for the case where we are comparing A - C1 with C2, that is 12345 12346 (subreg:MODE (plus (A) (-C1))) op (C2) 12347 12348 with C1 a constant, and try to lift the SUBREG, i.e. to do the 12349 comparison in the wider mode. One of the following two conditions 12350 must be true in order for this to be valid: 12351 12352 1. The mode extension results in the same bit pattern being added 12353 on both sides and the comparison is equality or unsigned. As 12354 C2 has been truncated to fit in MODE, the pattern can only be 12355 all 0s or all 1s. 12356 12357 2. The mode extension results in the sign bit being copied on 12358 each side. 12359 12360 The difficulty here is that we have predicates for A but not for 12361 (A - C1) so we need to check that C1 is within proper bounds so 12362 as to perturbate A as little as possible. */ 12363 12364 if (mode_width <= HOST_BITS_PER_WIDE_INT 12365 && subreg_lowpart_p (op0) 12366 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)), 12367 &inner_mode) 12368 && GET_MODE_PRECISION (inner_mode) > mode_width 12369 && GET_CODE (SUBREG_REG (op0)) == PLUS 12370 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))) 12371 { 12372 rtx a = XEXP (SUBREG_REG (op0), 0); 12373 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1)); 12374 12375 if ((c1 > 0 12376 && (unsigned HOST_WIDE_INT) c1 12377 < HOST_WIDE_INT_1U << (mode_width - 1) 12378 && (equality_comparison_p || unsigned_comparison_p) 12379 /* (A - C1) zero-extends if it is positive and sign-extends 12380 if it is negative, C2 both zero- and sign-extends. */ 12381 && (((nonzero_bits (a, inner_mode) 12382 & ~GET_MODE_MASK (mode)) == 0 12383 && const_op >= 0) 12384 /* (A - C1) sign-extends if it is positive and 1-extends 12385 if it is negative, C2 both sign- and 1-extends. */ 12386 || (num_sign_bit_copies (a, inner_mode) 12387 > (unsigned int) (GET_MODE_PRECISION (inner_mode) 12388 - mode_width) 12389 && const_op < 0))) 12390 || ((unsigned HOST_WIDE_INT) c1 12391 < HOST_WIDE_INT_1U << (mode_width - 2) 12392 /* (A - C1) always sign-extends, like C2. */ 12393 && num_sign_bit_copies (a, inner_mode) 12394 > (unsigned int) (GET_MODE_PRECISION (inner_mode) 12395 - (mode_width - 1)))) 12396 { 12397 op0 = SUBREG_REG (op0); 12398 continue; 12399 } 12400 } 12401 12402 /* If the inner mode is narrower and we are extracting the low part, 12403 we can treat the SUBREG as if it were a ZERO_EXTEND. */ 12404 if (paradoxical_subreg_p (op0)) 12405 ; 12406 else if (subreg_lowpart_p (op0) 12407 && GET_MODE_CLASS (mode) == MODE_INT 12408 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode) 12409 && (code == NE || code == EQ) 12410 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT 12411 && !paradoxical_subreg_p (op0) 12412 && (nonzero_bits (SUBREG_REG (op0), inner_mode) 12413 & ~GET_MODE_MASK (mode)) == 0) 12414 { 12415 /* Remove outer subregs that don't do anything. */ 12416 tem = gen_lowpart (inner_mode, op1); 12417 12418 if ((nonzero_bits (tem, inner_mode) 12419 & ~GET_MODE_MASK (mode)) == 0) 12420 { 12421 op0 = SUBREG_REG (op0); 12422 op1 = tem; 12423 continue; 12424 } 12425 break; 12426 } 12427 else 12428 break; 12429 12430 /* FALLTHROUGH */ 12431 12432 case ZERO_EXTEND: 12433 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode) 12434 && (unsigned_comparison_p || equality_comparison_p) 12435 && HWI_COMPUTABLE_MODE_P (mode) 12436 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode) 12437 && const_op >= 0 12438 && have_insn_for (COMPARE, mode)) 12439 { 12440 op0 = XEXP (op0, 0); 12441 continue; 12442 } 12443 break; 12444 12445 case PLUS: 12446 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do 12447 this for equality comparisons due to pathological cases involving 12448 overflows. */ 12449 if (equality_comparison_p 12450 && (tem = simplify_binary_operation (MINUS, mode, 12451 op1, XEXP (op0, 1))) != 0) 12452 { 12453 op0 = XEXP (op0, 0); 12454 op1 = tem; 12455 continue; 12456 } 12457 12458 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */ 12459 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx 12460 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p) 12461 { 12462 op0 = XEXP (XEXP (op0, 0), 0); 12463 code = (code == LT ? EQ : NE); 12464 continue; 12465 } 12466 break; 12467 12468 case MINUS: 12469 /* We used to optimize signed comparisons against zero, but that 12470 was incorrect. Unsigned comparisons against zero (GTU, LEU) 12471 arrive here as equality comparisons, or (GEU, LTU) are 12472 optimized away. No need to special-case them. */ 12473 12474 /* (eq (minus A B) C) -> (eq A (plus B C)) or 12475 (eq B (minus A C)), whichever simplifies. We can only do 12476 this for equality comparisons due to pathological cases involving 12477 overflows. */ 12478 if (equality_comparison_p 12479 && (tem = simplify_binary_operation (PLUS, mode, 12480 XEXP (op0, 1), op1)) != 0) 12481 { 12482 op0 = XEXP (op0, 0); 12483 op1 = tem; 12484 continue; 12485 } 12486 12487 if (equality_comparison_p 12488 && (tem = simplify_binary_operation (MINUS, mode, 12489 XEXP (op0, 0), op1)) != 0) 12490 { 12491 op0 = XEXP (op0, 1); 12492 op1 = tem; 12493 continue; 12494 } 12495 12496 /* The sign bit of (minus (ashiftrt X C) X), where C is the number 12497 of bits in X minus 1, is one iff X > 0. */ 12498 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT 12499 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)) 12500 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1 12501 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1))) 12502 { 12503 op0 = XEXP (op0, 1); 12504 code = (code == GE ? LE : GT); 12505 continue; 12506 } 12507 break; 12508 12509 case XOR: 12510 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification 12511 if C is zero or B is a constant. */ 12512 if (equality_comparison_p 12513 && (tem = simplify_binary_operation (XOR, mode, 12514 XEXP (op0, 1), op1)) != 0) 12515 { 12516 op0 = XEXP (op0, 0); 12517 op1 = tem; 12518 continue; 12519 } 12520 break; 12521 12522 12523 case IOR: 12524 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero 12525 iff X <= 0. */ 12526 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS 12527 && XEXP (XEXP (op0, 0), 1) == constm1_rtx 12528 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1))) 12529 { 12530 op0 = XEXP (op0, 1); 12531 code = (code == GE ? GT : LE); 12532 continue; 12533 } 12534 break; 12535 12536 case AND: 12537 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This 12538 will be converted to a ZERO_EXTRACT later. */ 12539 if (const_op == 0 && equality_comparison_p 12540 && GET_CODE (XEXP (op0, 0)) == ASHIFT 12541 && XEXP (XEXP (op0, 0), 0) == const1_rtx) 12542 { 12543 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1), 12544 XEXP (XEXP (op0, 0), 1)); 12545 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1); 12546 continue; 12547 } 12548 12549 /* If we are comparing (and (lshiftrt X C1) C2) for equality with 12550 zero and X is a comparison and C1 and C2 describe only bits set 12551 in STORE_FLAG_VALUE, we can compare with X. */ 12552 if (const_op == 0 && equality_comparison_p 12553 && mode_width <= HOST_BITS_PER_WIDE_INT 12554 && CONST_INT_P (XEXP (op0, 1)) 12555 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT 12556 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)) 12557 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0 12558 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT) 12559 { 12560 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode)) 12561 << INTVAL (XEXP (XEXP (op0, 0), 1))); 12562 if ((~STORE_FLAG_VALUE & mask) == 0 12563 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0)) 12564 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0 12565 && COMPARISON_P (tem)))) 12566 { 12567 op0 = XEXP (XEXP (op0, 0), 0); 12568 continue; 12569 } 12570 } 12571 12572 /* If we are doing an equality comparison of an AND of a bit equal 12573 to the sign bit, replace this with a LT or GE comparison of 12574 the underlying value. */ 12575 if (equality_comparison_p 12576 && const_op == 0 12577 && CONST_INT_P (XEXP (op0, 1)) 12578 && mode_width <= HOST_BITS_PER_WIDE_INT 12579 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode)) 12580 == HOST_WIDE_INT_1U << (mode_width - 1))) 12581 { 12582 op0 = XEXP (op0, 0); 12583 code = (code == EQ ? GE : LT); 12584 continue; 12585 } 12586 12587 /* If this AND operation is really a ZERO_EXTEND from a narrower 12588 mode, the constant fits within that mode, and this is either an 12589 equality or unsigned comparison, try to do this comparison in 12590 the narrower mode. 12591 12592 Note that in: 12593 12594 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0)) 12595 -> (ne:DI (reg:SI 4) (const_int 0)) 12596 12597 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is 12598 known to hold a value of the required mode the 12599 transformation is invalid. */ 12600 if ((equality_comparison_p || unsigned_comparison_p) 12601 && CONST_INT_P (XEXP (op0, 1)) 12602 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1)) 12603 & GET_MODE_MASK (mode)) 12604 + 1)) >= 0 12605 && const_op >> i == 0 12606 && int_mode_for_size (i, 1).exists (&tmode)) 12607 { 12608 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0)); 12609 continue; 12610 } 12611 12612 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1 12613 fits in both M1 and M2 and the SUBREG is either paradoxical 12614 or represents the low part, permute the SUBREG and the AND 12615 and try again. */ 12616 if (GET_CODE (XEXP (op0, 0)) == SUBREG 12617 && CONST_INT_P (XEXP (op0, 1))) 12618 { 12619 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1)); 12620 /* Require an integral mode, to avoid creating something like 12621 (AND:SF ...). */ 12622 if ((is_a <scalar_int_mode> 12623 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode)) 12624 /* It is unsafe to commute the AND into the SUBREG if the 12625 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is 12626 not defined. As originally written the upper bits 12627 have a defined value due to the AND operation. 12628 However, if we commute the AND inside the SUBREG then 12629 they no longer have defined values and the meaning of 12630 the code has been changed. 12631 Also C1 should not change value in the smaller mode, 12632 see PR67028 (a positive C1 can become negative in the 12633 smaller mode, so that the AND does no longer mask the 12634 upper bits). */ 12635 && ((WORD_REGISTER_OPERATIONS 12636 && mode_width > GET_MODE_PRECISION (tmode) 12637 && mode_width <= BITS_PER_WORD 12638 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1) 12639 || (mode_width <= GET_MODE_PRECISION (tmode) 12640 && subreg_lowpart_p (XEXP (op0, 0)))) 12641 && mode_width <= HOST_BITS_PER_WIDE_INT 12642 && HWI_COMPUTABLE_MODE_P (tmode) 12643 && (c1 & ~mask) == 0 12644 && (c1 & ~GET_MODE_MASK (tmode)) == 0 12645 && c1 != mask 12646 && c1 != GET_MODE_MASK (tmode)) 12647 { 12648 op0 = simplify_gen_binary (AND, tmode, 12649 SUBREG_REG (XEXP (op0, 0)), 12650 gen_int_mode (c1, tmode)); 12651 op0 = gen_lowpart (mode, op0); 12652 continue; 12653 } 12654 } 12655 12656 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */ 12657 if (const_op == 0 && equality_comparison_p 12658 && XEXP (op0, 1) == const1_rtx 12659 && GET_CODE (XEXP (op0, 0)) == NOT) 12660 { 12661 op0 = simplify_and_const_int (NULL_RTX, mode, 12662 XEXP (XEXP (op0, 0), 0), 1); 12663 code = (code == NE ? EQ : NE); 12664 continue; 12665 } 12666 12667 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to 12668 (eq (and (lshiftrt X) 1) 0). 12669 Also handle the case where (not X) is expressed using xor. */ 12670 if (const_op == 0 && equality_comparison_p 12671 && XEXP (op0, 1) == const1_rtx 12672 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT) 12673 { 12674 rtx shift_op = XEXP (XEXP (op0, 0), 0); 12675 rtx shift_count = XEXP (XEXP (op0, 0), 1); 12676 12677 if (GET_CODE (shift_op) == NOT 12678 || (GET_CODE (shift_op) == XOR 12679 && CONST_INT_P (XEXP (shift_op, 1)) 12680 && CONST_INT_P (shift_count) 12681 && HWI_COMPUTABLE_MODE_P (mode) 12682 && (UINTVAL (XEXP (shift_op, 1)) 12683 == HOST_WIDE_INT_1U 12684 << INTVAL (shift_count)))) 12685 { 12686 op0 12687 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count); 12688 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1); 12689 code = (code == NE ? EQ : NE); 12690 continue; 12691 } 12692 } 12693 break; 12694 12695 case ASHIFT: 12696 /* If we have (compare (ashift FOO N) (const_int C)) and 12697 the high order N bits of FOO (N+1 if an inequality comparison) 12698 are known to be zero, we can do this by comparing FOO with C 12699 shifted right N bits so long as the low-order N bits of C are 12700 zero. */ 12701 if (CONST_INT_P (XEXP (op0, 1)) 12702 && INTVAL (XEXP (op0, 1)) >= 0 12703 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p) 12704 < HOST_BITS_PER_WIDE_INT) 12705 && (((unsigned HOST_WIDE_INT) const_op 12706 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1))) 12707 - 1)) == 0) 12708 && mode_width <= HOST_BITS_PER_WIDE_INT 12709 && (nonzero_bits (XEXP (op0, 0), mode) 12710 & ~(mask >> (INTVAL (XEXP (op0, 1)) 12711 + ! equality_comparison_p))) == 0) 12712 { 12713 /* We must perform a logical shift, not an arithmetic one, 12714 as we want the top N bits of C to be zero. */ 12715 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode); 12716 12717 temp >>= INTVAL (XEXP (op0, 1)); 12718 op1 = gen_int_mode (temp, mode); 12719 op0 = XEXP (op0, 0); 12720 continue; 12721 } 12722 12723 /* If we are doing a sign bit comparison, it means we are testing 12724 a particular bit. Convert it to the appropriate AND. */ 12725 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1)) 12726 && mode_width <= HOST_BITS_PER_WIDE_INT) 12727 { 12728 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 12729 (HOST_WIDE_INT_1U 12730 << (mode_width - 1 12731 - INTVAL (XEXP (op0, 1))))); 12732 code = (code == LT ? NE : EQ); 12733 continue; 12734 } 12735 12736 /* If this an equality comparison with zero and we are shifting 12737 the low bit to the sign bit, we can convert this to an AND of the 12738 low-order bit. */ 12739 if (const_op == 0 && equality_comparison_p 12740 && CONST_INT_P (XEXP (op0, 1)) 12741 && UINTVAL (XEXP (op0, 1)) == mode_width - 1) 12742 { 12743 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1); 12744 continue; 12745 } 12746 break; 12747 12748 case ASHIFTRT: 12749 /* If this is an equality comparison with zero, we can do this 12750 as a logical shift, which might be much simpler. */ 12751 if (equality_comparison_p && const_op == 0 12752 && CONST_INT_P (XEXP (op0, 1))) 12753 { 12754 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode, 12755 XEXP (op0, 0), 12756 INTVAL (XEXP (op0, 1))); 12757 continue; 12758 } 12759 12760 /* If OP0 is a sign extension and CODE is not an unsigned comparison, 12761 do the comparison in a narrower mode. */ 12762 if (! unsigned_comparison_p 12763 && CONST_INT_P (XEXP (op0, 1)) 12764 && GET_CODE (XEXP (op0, 0)) == ASHIFT 12765 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1) 12766 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1) 12767 .exists (&tmode)) 12768 && (((unsigned HOST_WIDE_INT) const_op 12769 + (GET_MODE_MASK (tmode) >> 1) + 1) 12770 <= GET_MODE_MASK (tmode))) 12771 { 12772 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0)); 12773 continue; 12774 } 12775 12776 /* Likewise if OP0 is a PLUS of a sign extension with a 12777 constant, which is usually represented with the PLUS 12778 between the shifts. */ 12779 if (! unsigned_comparison_p 12780 && CONST_INT_P (XEXP (op0, 1)) 12781 && GET_CODE (XEXP (op0, 0)) == PLUS 12782 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)) 12783 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT 12784 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1) 12785 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1) 12786 .exists (&tmode)) 12787 && (((unsigned HOST_WIDE_INT) const_op 12788 + (GET_MODE_MASK (tmode) >> 1) + 1) 12789 <= GET_MODE_MASK (tmode))) 12790 { 12791 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0); 12792 rtx add_const = XEXP (XEXP (op0, 0), 1); 12793 rtx new_const = simplify_gen_binary (ASHIFTRT, mode, 12794 add_const, XEXP (op0, 1)); 12795 12796 op0 = simplify_gen_binary (PLUS, tmode, 12797 gen_lowpart (tmode, inner), 12798 new_const); 12799 continue; 12800 } 12801 12802 /* FALLTHROUGH */ 12803 case LSHIFTRT: 12804 /* If we have (compare (xshiftrt FOO N) (const_int C)) and 12805 the low order N bits of FOO are known to be zero, we can do this 12806 by comparing FOO with C shifted left N bits so long as no 12807 overflow occurs. Even if the low order N bits of FOO aren't known 12808 to be zero, if the comparison is >= or < we can use the same 12809 optimization and for > or <= by setting all the low 12810 order N bits in the comparison constant. */ 12811 if (CONST_INT_P (XEXP (op0, 1)) 12812 && INTVAL (XEXP (op0, 1)) > 0 12813 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT 12814 && mode_width <= HOST_BITS_PER_WIDE_INT 12815 && (((unsigned HOST_WIDE_INT) const_op 12816 + (GET_CODE (op0) != LSHIFTRT 12817 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1) 12818 + 1) 12819 : 0)) 12820 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)))) 12821 { 12822 unsigned HOST_WIDE_INT low_bits 12823 = (nonzero_bits (XEXP (op0, 0), mode) 12824 & ((HOST_WIDE_INT_1U 12825 << INTVAL (XEXP (op0, 1))) - 1)); 12826 if (low_bits == 0 || !equality_comparison_p) 12827 { 12828 /* If the shift was logical, then we must make the condition 12829 unsigned. */ 12830 if (GET_CODE (op0) == LSHIFTRT) 12831 code = unsigned_condition (code); 12832 12833 const_op = (unsigned HOST_WIDE_INT) const_op 12834 << INTVAL (XEXP (op0, 1)); 12835 if (low_bits != 0 12836 && (code == GT || code == GTU 12837 || code == LE || code == LEU)) 12838 const_op 12839 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1); 12840 op1 = GEN_INT (const_op); 12841 op0 = XEXP (op0, 0); 12842 continue; 12843 } 12844 } 12845 12846 /* If we are using this shift to extract just the sign bit, we 12847 can replace this with an LT or GE comparison. */ 12848 if (const_op == 0 12849 && (equality_comparison_p || sign_bit_comparison_p) 12850 && CONST_INT_P (XEXP (op0, 1)) 12851 && UINTVAL (XEXP (op0, 1)) == mode_width - 1) 12852 { 12853 op0 = XEXP (op0, 0); 12854 code = (code == NE || code == GT ? LT : GE); 12855 continue; 12856 } 12857 break; 12858 12859 default: 12860 break; 12861 } 12862 12863 break; 12864 } 12865 12866 /* Now make any compound operations involved in this comparison. Then, 12867 check for an outmost SUBREG on OP0 that is not doing anything or is 12868 paradoxical. The latter transformation must only be performed when 12869 it is known that the "extra" bits will be the same in op0 and op1 or 12870 that they don't matter. There are three cases to consider: 12871 12872 1. SUBREG_REG (op0) is a register. In this case the bits are don't 12873 care bits and we can assume they have any convenient value. So 12874 making the transformation is safe. 12875 12876 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN. 12877 In this case the upper bits of op0 are undefined. We should not make 12878 the simplification in that case as we do not know the contents of 12879 those bits. 12880 12881 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN. 12882 In that case we know those bits are zeros or ones. We must also be 12883 sure that they are the same as the upper bits of op1. 12884 12885 We can never remove a SUBREG for a non-equality comparison because 12886 the sign bit is in a different place in the underlying object. */ 12887 12888 rtx_code op0_mco_code = SET; 12889 if (op1 == const0_rtx) 12890 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE; 12891 12892 op0 = make_compound_operation (op0, op0_mco_code); 12893 op1 = make_compound_operation (op1, SET); 12894 12895 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0) 12896 && is_int_mode (GET_MODE (op0), &mode) 12897 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode) 12898 && (code == NE || code == EQ)) 12899 { 12900 if (paradoxical_subreg_p (op0)) 12901 { 12902 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't 12903 implemented. */ 12904 if (REG_P (SUBREG_REG (op0))) 12905 { 12906 op0 = SUBREG_REG (op0); 12907 op1 = gen_lowpart (inner_mode, op1); 12908 } 12909 } 12910 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT 12911 && (nonzero_bits (SUBREG_REG (op0), inner_mode) 12912 & ~GET_MODE_MASK (mode)) == 0) 12913 { 12914 tem = gen_lowpart (inner_mode, op1); 12915 12916 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0) 12917 op0 = SUBREG_REG (op0), op1 = tem; 12918 } 12919 } 12920 12921 /* We now do the opposite procedure: Some machines don't have compare 12922 insns in all modes. If OP0's mode is an integer mode smaller than a 12923 word and we can't do a compare in that mode, see if there is a larger 12924 mode for which we can do the compare. There are a number of cases in 12925 which we can use the wider mode. */ 12926 12927 if (is_int_mode (GET_MODE (op0), &mode) 12928 && GET_MODE_SIZE (mode) < UNITS_PER_WORD 12929 && ! have_insn_for (COMPARE, mode)) 12930 FOR_EACH_WIDER_MODE (tmode_iter, mode) 12931 { 12932 tmode = tmode_iter.require (); 12933 if (!HWI_COMPUTABLE_MODE_P (tmode)) 12934 break; 12935 if (have_insn_for (COMPARE, tmode)) 12936 { 12937 int zero_extended; 12938 12939 /* If this is a test for negative, we can make an explicit 12940 test of the sign bit. Test this first so we can use 12941 a paradoxical subreg to extend OP0. */ 12942 12943 if (op1 == const0_rtx && (code == LT || code == GE) 12944 && HWI_COMPUTABLE_MODE_P (mode)) 12945 { 12946 unsigned HOST_WIDE_INT sign 12947 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1); 12948 op0 = simplify_gen_binary (AND, tmode, 12949 gen_lowpart (tmode, op0), 12950 gen_int_mode (sign, tmode)); 12951 code = (code == LT) ? NE : EQ; 12952 break; 12953 } 12954 12955 /* If the only nonzero bits in OP0 and OP1 are those in the 12956 narrower mode and this is an equality or unsigned comparison, 12957 we can use the wider mode. Similarly for sign-extended 12958 values, in which case it is true for all comparisons. */ 12959 zero_extended = ((code == EQ || code == NE 12960 || code == GEU || code == GTU 12961 || code == LEU || code == LTU) 12962 && (nonzero_bits (op0, tmode) 12963 & ~GET_MODE_MASK (mode)) == 0 12964 && ((CONST_INT_P (op1) 12965 || (nonzero_bits (op1, tmode) 12966 & ~GET_MODE_MASK (mode)) == 0))); 12967 12968 if (zero_extended 12969 || ((num_sign_bit_copies (op0, tmode) 12970 > (unsigned int) (GET_MODE_PRECISION (tmode) 12971 - GET_MODE_PRECISION (mode))) 12972 && (num_sign_bit_copies (op1, tmode) 12973 > (unsigned int) (GET_MODE_PRECISION (tmode) 12974 - GET_MODE_PRECISION (mode))))) 12975 { 12976 /* If OP0 is an AND and we don't have an AND in MODE either, 12977 make a new AND in the proper mode. */ 12978 if (GET_CODE (op0) == AND 12979 && !have_insn_for (AND, mode)) 12980 op0 = simplify_gen_binary (AND, tmode, 12981 gen_lowpart (tmode, 12982 XEXP (op0, 0)), 12983 gen_lowpart (tmode, 12984 XEXP (op0, 1))); 12985 else 12986 { 12987 if (zero_extended) 12988 { 12989 op0 = simplify_gen_unary (ZERO_EXTEND, tmode, 12990 op0, mode); 12991 op1 = simplify_gen_unary (ZERO_EXTEND, tmode, 12992 op1, mode); 12993 } 12994 else 12995 { 12996 op0 = simplify_gen_unary (SIGN_EXTEND, tmode, 12997 op0, mode); 12998 op1 = simplify_gen_unary (SIGN_EXTEND, tmode, 12999 op1, mode); 13000 } 13001 break; 13002 } 13003 } 13004 } 13005 } 13006 13007 /* We may have changed the comparison operands. Re-canonicalize. */ 13008 if (swap_commutative_operands_p (op0, op1)) 13009 { 13010 std::swap (op0, op1); 13011 code = swap_condition (code); 13012 } 13013 13014 /* If this machine only supports a subset of valid comparisons, see if we 13015 can convert an unsupported one into a supported one. */ 13016 target_canonicalize_comparison (&code, &op0, &op1, 0); 13017 13018 *pop0 = op0; 13019 *pop1 = op1; 13020 13021 return code; 13022 } 13023 13024 /* Utility function for record_value_for_reg. Count number of 13025 rtxs in X. */ 13026 static int 13027 count_rtxs (rtx x) 13028 { 13029 enum rtx_code code = GET_CODE (x); 13030 const char *fmt; 13031 int i, j, ret = 1; 13032 13033 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH 13034 || GET_RTX_CLASS (code) == RTX_COMM_ARITH) 13035 { 13036 rtx x0 = XEXP (x, 0); 13037 rtx x1 = XEXP (x, 1); 13038 13039 if (x0 == x1) 13040 return 1 + 2 * count_rtxs (x0); 13041 13042 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH 13043 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH) 13044 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1))) 13045 return 2 + 2 * count_rtxs (x0) 13046 + count_rtxs (x == XEXP (x1, 0) 13047 ? XEXP (x1, 1) : XEXP (x1, 0)); 13048 13049 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH 13050 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH) 13051 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1))) 13052 return 2 + 2 * count_rtxs (x1) 13053 + count_rtxs (x == XEXP (x0, 0) 13054 ? XEXP (x0, 1) : XEXP (x0, 0)); 13055 } 13056 13057 fmt = GET_RTX_FORMAT (code); 13058 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) 13059 if (fmt[i] == 'e') 13060 ret += count_rtxs (XEXP (x, i)); 13061 else if (fmt[i] == 'E') 13062 for (j = 0; j < XVECLEN (x, i); j++) 13063 ret += count_rtxs (XVECEXP (x, i, j)); 13064 13065 return ret; 13066 } 13067 13068 /* Utility function for following routine. Called when X is part of a value 13069 being stored into last_set_value. Sets last_set_table_tick 13070 for each register mentioned. Similar to mention_regs in cse.c */ 13071 13072 static void 13073 update_table_tick (rtx x) 13074 { 13075 enum rtx_code code = GET_CODE (x); 13076 const char *fmt = GET_RTX_FORMAT (code); 13077 int i, j; 13078 13079 if (code == REG) 13080 { 13081 unsigned int regno = REGNO (x); 13082 unsigned int endregno = END_REGNO (x); 13083 unsigned int r; 13084 13085 for (r = regno; r < endregno; r++) 13086 { 13087 reg_stat_type *rsp = ®_stat[r]; 13088 rsp->last_set_table_tick = label_tick; 13089 } 13090 13091 return; 13092 } 13093 13094 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) 13095 if (fmt[i] == 'e') 13096 { 13097 /* Check for identical subexpressions. If x contains 13098 identical subexpression we only have to traverse one of 13099 them. */ 13100 if (i == 0 && ARITHMETIC_P (x)) 13101 { 13102 /* Note that at this point x1 has already been 13103 processed. */ 13104 rtx x0 = XEXP (x, 0); 13105 rtx x1 = XEXP (x, 1); 13106 13107 /* If x0 and x1 are identical then there is no need to 13108 process x0. */ 13109 if (x0 == x1) 13110 break; 13111 13112 /* If x0 is identical to a subexpression of x1 then while 13113 processing x1, x0 has already been processed. Thus we 13114 are done with x. */ 13115 if (ARITHMETIC_P (x1) 13116 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1))) 13117 break; 13118 13119 /* If x1 is identical to a subexpression of x0 then we 13120 still have to process the rest of x0. */ 13121 if (ARITHMETIC_P (x0) 13122 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1))) 13123 { 13124 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0)); 13125 break; 13126 } 13127 } 13128 13129 update_table_tick (XEXP (x, i)); 13130 } 13131 else if (fmt[i] == 'E') 13132 for (j = 0; j < XVECLEN (x, i); j++) 13133 update_table_tick (XVECEXP (x, i, j)); 13134 } 13135 13136 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we 13137 are saying that the register is clobbered and we no longer know its 13138 value. If INSN is zero, don't update reg_stat[].last_set; this is 13139 only permitted with VALUE also zero and is used to invalidate the 13140 register. */ 13141 13142 static void 13143 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value) 13144 { 13145 unsigned int regno = REGNO (reg); 13146 unsigned int endregno = END_REGNO (reg); 13147 unsigned int i; 13148 reg_stat_type *rsp; 13149 13150 /* If VALUE contains REG and we have a previous value for REG, substitute 13151 the previous value. */ 13152 if (value && insn && reg_overlap_mentioned_p (reg, value)) 13153 { 13154 rtx tem; 13155 13156 /* Set things up so get_last_value is allowed to see anything set up to 13157 our insn. */ 13158 subst_low_luid = DF_INSN_LUID (insn); 13159 tem = get_last_value (reg); 13160 13161 /* If TEM is simply a binary operation with two CLOBBERs as operands, 13162 it isn't going to be useful and will take a lot of time to process, 13163 so just use the CLOBBER. */ 13164 13165 if (tem) 13166 { 13167 if (ARITHMETIC_P (tem) 13168 && GET_CODE (XEXP (tem, 0)) == CLOBBER 13169 && GET_CODE (XEXP (tem, 1)) == CLOBBER) 13170 tem = XEXP (tem, 0); 13171 else if (count_occurrences (value, reg, 1) >= 2) 13172 { 13173 /* If there are two or more occurrences of REG in VALUE, 13174 prevent the value from growing too much. */ 13175 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL) 13176 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx); 13177 } 13178 13179 value = replace_rtx (copy_rtx (value), reg, tem); 13180 } 13181 } 13182 13183 /* For each register modified, show we don't know its value, that 13184 we don't know about its bitwise content, that its value has been 13185 updated, and that we don't know the location of the death of the 13186 register. */ 13187 for (i = regno; i < endregno; i++) 13188 { 13189 rsp = ®_stat[i]; 13190 13191 if (insn) 13192 rsp->last_set = insn; 13193 13194 rsp->last_set_value = 0; 13195 rsp->last_set_mode = VOIDmode; 13196 rsp->last_set_nonzero_bits = 0; 13197 rsp->last_set_sign_bit_copies = 0; 13198 rsp->last_death = 0; 13199 rsp->truncated_to_mode = VOIDmode; 13200 } 13201 13202 /* Mark registers that are being referenced in this value. */ 13203 if (value) 13204 update_table_tick (value); 13205 13206 /* Now update the status of each register being set. 13207 If someone is using this register in this block, set this register 13208 to invalid since we will get confused between the two lives in this 13209 basic block. This makes using this register always invalid. In cse, we 13210 scan the table to invalidate all entries using this register, but this 13211 is too much work for us. */ 13212 13213 for (i = regno; i < endregno; i++) 13214 { 13215 rsp = ®_stat[i]; 13216 rsp->last_set_label = label_tick; 13217 if (!insn 13218 || (value && rsp->last_set_table_tick >= label_tick_ebb_start)) 13219 rsp->last_set_invalid = 1; 13220 else 13221 rsp->last_set_invalid = 0; 13222 } 13223 13224 /* The value being assigned might refer to X (like in "x++;"). In that 13225 case, we must replace it with (clobber (const_int 0)) to prevent 13226 infinite loops. */ 13227 rsp = ®_stat[regno]; 13228 if (value && !get_last_value_validate (&value, insn, label_tick, 0)) 13229 { 13230 value = copy_rtx (value); 13231 if (!get_last_value_validate (&value, insn, label_tick, 1)) 13232 value = 0; 13233 } 13234 13235 /* For the main register being modified, update the value, the mode, the 13236 nonzero bits, and the number of sign bit copies. */ 13237 13238 rsp->last_set_value = value; 13239 13240 if (value) 13241 { 13242 machine_mode mode = GET_MODE (reg); 13243 subst_low_luid = DF_INSN_LUID (insn); 13244 rsp->last_set_mode = mode; 13245 if (GET_MODE_CLASS (mode) == MODE_INT 13246 && HWI_COMPUTABLE_MODE_P (mode)) 13247 mode = nonzero_bits_mode; 13248 rsp->last_set_nonzero_bits = nonzero_bits (value, mode); 13249 rsp->last_set_sign_bit_copies 13250 = num_sign_bit_copies (value, GET_MODE (reg)); 13251 } 13252 } 13253 13254 /* Called via note_stores from record_dead_and_set_regs to handle one 13255 SET or CLOBBER in an insn. DATA is the instruction in which the 13256 set is occurring. */ 13257 13258 static void 13259 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data) 13260 { 13261 rtx_insn *record_dead_insn = (rtx_insn *) data; 13262 13263 if (GET_CODE (dest) == SUBREG) 13264 dest = SUBREG_REG (dest); 13265 13266 if (!record_dead_insn) 13267 { 13268 if (REG_P (dest)) 13269 record_value_for_reg (dest, NULL, NULL_RTX); 13270 return; 13271 } 13272 13273 if (REG_P (dest)) 13274 { 13275 /* If we are setting the whole register, we know its value. Otherwise 13276 show that we don't know the value. We can handle a SUBREG if it's 13277 the low part, but we must be careful with paradoxical SUBREGs on 13278 RISC architectures because we cannot strip e.g. an extension around 13279 a load and record the naked load since the RTL middle-end considers 13280 that the upper bits are defined according to LOAD_EXTEND_OP. */ 13281 if (GET_CODE (setter) == SET && dest == SET_DEST (setter)) 13282 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter)); 13283 else if (GET_CODE (setter) == SET 13284 && GET_CODE (SET_DEST (setter)) == SUBREG 13285 && SUBREG_REG (SET_DEST (setter)) == dest 13286 && known_le (GET_MODE_PRECISION (GET_MODE (dest)), 13287 BITS_PER_WORD) 13288 && subreg_lowpart_p (SET_DEST (setter))) 13289 record_value_for_reg (dest, record_dead_insn, 13290 WORD_REGISTER_OPERATIONS 13291 && paradoxical_subreg_p (SET_DEST (setter)) 13292 ? SET_SRC (setter) 13293 : gen_lowpart (GET_MODE (dest), 13294 SET_SRC (setter))); 13295 else 13296 record_value_for_reg (dest, record_dead_insn, NULL_RTX); 13297 } 13298 else if (MEM_P (dest) 13299 /* Ignore pushes, they clobber nothing. */ 13300 && ! push_operand (dest, GET_MODE (dest))) 13301 mem_last_set = DF_INSN_LUID (record_dead_insn); 13302 } 13303 13304 /* Update the records of when each REG was most recently set or killed 13305 for the things done by INSN. This is the last thing done in processing 13306 INSN in the combiner loop. 13307 13308 We update reg_stat[], in particular fields last_set, last_set_value, 13309 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies, 13310 last_death, and also the similar information mem_last_set (which insn 13311 most recently modified memory) and last_call_luid (which insn was the 13312 most recent subroutine call). */ 13313 13314 static void 13315 record_dead_and_set_regs (rtx_insn *insn) 13316 { 13317 rtx link; 13318 unsigned int i; 13319 13320 for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) 13321 { 13322 if (REG_NOTE_KIND (link) == REG_DEAD 13323 && REG_P (XEXP (link, 0))) 13324 { 13325 unsigned int regno = REGNO (XEXP (link, 0)); 13326 unsigned int endregno = END_REGNO (XEXP (link, 0)); 13327 13328 for (i = regno; i < endregno; i++) 13329 { 13330 reg_stat_type *rsp; 13331 13332 rsp = ®_stat[i]; 13333 rsp->last_death = insn; 13334 } 13335 } 13336 else if (REG_NOTE_KIND (link) == REG_INC) 13337 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX); 13338 } 13339 13340 if (CALL_P (insn)) 13341 { 13342 hard_reg_set_iterator hrsi; 13343 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi) 13344 { 13345 reg_stat_type *rsp; 13346 13347 rsp = ®_stat[i]; 13348 rsp->last_set_invalid = 1; 13349 rsp->last_set = insn; 13350 rsp->last_set_value = 0; 13351 rsp->last_set_mode = VOIDmode; 13352 rsp->last_set_nonzero_bits = 0; 13353 rsp->last_set_sign_bit_copies = 0; 13354 rsp->last_death = 0; 13355 rsp->truncated_to_mode = VOIDmode; 13356 } 13357 13358 last_call_luid = mem_last_set = DF_INSN_LUID (insn); 13359 13360 /* We can't combine into a call pattern. Remember, though, that 13361 the return value register is set at this LUID. We could 13362 still replace a register with the return value from the 13363 wrong subroutine call! */ 13364 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX); 13365 } 13366 else 13367 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn); 13368 } 13369 13370 /* If a SUBREG has the promoted bit set, it is in fact a property of the 13371 register present in the SUBREG, so for each such SUBREG go back and 13372 adjust nonzero and sign bit information of the registers that are 13373 known to have some zero/sign bits set. 13374 13375 This is needed because when combine blows the SUBREGs away, the 13376 information on zero/sign bits is lost and further combines can be 13377 missed because of that. */ 13378 13379 static void 13380 record_promoted_value (rtx_insn *insn, rtx subreg) 13381 { 13382 struct insn_link *links; 13383 rtx set; 13384 unsigned int regno = REGNO (SUBREG_REG (subreg)); 13385 machine_mode mode = GET_MODE (subreg); 13386 13387 if (!HWI_COMPUTABLE_MODE_P (mode)) 13388 return; 13389 13390 for (links = LOG_LINKS (insn); links;) 13391 { 13392 reg_stat_type *rsp; 13393 13394 insn = links->insn; 13395 set = single_set (insn); 13396 13397 if (! set || !REG_P (SET_DEST (set)) 13398 || REGNO (SET_DEST (set)) != regno 13399 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg))) 13400 { 13401 links = links->next; 13402 continue; 13403 } 13404 13405 rsp = ®_stat[regno]; 13406 if (rsp->last_set == insn) 13407 { 13408 if (SUBREG_PROMOTED_UNSIGNED_P (subreg)) 13409 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode); 13410 } 13411 13412 if (REG_P (SET_SRC (set))) 13413 { 13414 regno = REGNO (SET_SRC (set)); 13415 links = LOG_LINKS (insn); 13416 } 13417 else 13418 break; 13419 } 13420 } 13421 13422 /* Check if X, a register, is known to contain a value already 13423 truncated to MODE. In this case we can use a subreg to refer to 13424 the truncated value even though in the generic case we would need 13425 an explicit truncation. */ 13426 13427 static bool 13428 reg_truncated_to_mode (machine_mode mode, const_rtx x) 13429 { 13430 reg_stat_type *rsp = ®_stat[REGNO (x)]; 13431 machine_mode truncated = rsp->truncated_to_mode; 13432 13433 if (truncated == 0 13434 || rsp->truncation_label < label_tick_ebb_start) 13435 return false; 13436 if (!partial_subreg_p (mode, truncated)) 13437 return true; 13438 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated)) 13439 return true; 13440 return false; 13441 } 13442 13443 /* If X is a hard reg or a subreg record the mode that the register is 13444 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be 13445 able to turn a truncate into a subreg using this information. Return true 13446 if traversing X is complete. */ 13447 13448 static bool 13449 record_truncated_value (rtx x) 13450 { 13451 machine_mode truncated_mode; 13452 reg_stat_type *rsp; 13453 13454 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x))) 13455 { 13456 machine_mode original_mode = GET_MODE (SUBREG_REG (x)); 13457 truncated_mode = GET_MODE (x); 13458 13459 if (!partial_subreg_p (truncated_mode, original_mode)) 13460 return true; 13461 13462 truncated_mode = GET_MODE (x); 13463 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode)) 13464 return true; 13465 13466 x = SUBREG_REG (x); 13467 } 13468 /* ??? For hard-regs we now record everything. We might be able to 13469 optimize this using last_set_mode. */ 13470 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER) 13471 truncated_mode = GET_MODE (x); 13472 else 13473 return false; 13474 13475 rsp = ®_stat[REGNO (x)]; 13476 if (rsp->truncated_to_mode == 0 13477 || rsp->truncation_label < label_tick_ebb_start 13478 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode)) 13479 { 13480 rsp->truncated_to_mode = truncated_mode; 13481 rsp->truncation_label = label_tick; 13482 } 13483 13484 return true; 13485 } 13486 13487 /* Callback for note_uses. Find hardregs and subregs of pseudos and 13488 the modes they are used in. This can help truning TRUNCATEs into 13489 SUBREGs. */ 13490 13491 static void 13492 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED) 13493 { 13494 subrtx_var_iterator::array_type array; 13495 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST) 13496 if (record_truncated_value (*iter)) 13497 iter.skip_subrtxes (); 13498 } 13499 13500 /* Scan X for promoted SUBREGs. For each one found, 13501 note what it implies to the registers used in it. */ 13502 13503 static void 13504 check_promoted_subreg (rtx_insn *insn, rtx x) 13505 { 13506 if (GET_CODE (x) == SUBREG 13507 && SUBREG_PROMOTED_VAR_P (x) 13508 && REG_P (SUBREG_REG (x))) 13509 record_promoted_value (insn, x); 13510 else 13511 { 13512 const char *format = GET_RTX_FORMAT (GET_CODE (x)); 13513 int i, j; 13514 13515 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++) 13516 switch (format[i]) 13517 { 13518 case 'e': 13519 check_promoted_subreg (insn, XEXP (x, i)); 13520 break; 13521 case 'V': 13522 case 'E': 13523 if (XVEC (x, i) != 0) 13524 for (j = 0; j < XVECLEN (x, i); j++) 13525 check_promoted_subreg (insn, XVECEXP (x, i, j)); 13526 break; 13527 } 13528 } 13529 } 13530 13531 /* Verify that all the registers and memory references mentioned in *LOC are 13532 still valid. *LOC was part of a value set in INSN when label_tick was 13533 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace 13534 the invalid references with (clobber (const_int 0)) and return 1. This 13535 replacement is useful because we often can get useful information about 13536 the form of a value (e.g., if it was produced by a shift that always 13537 produces -1 or 0) even though we don't know exactly what registers it 13538 was produced from. */ 13539 13540 static int 13541 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace) 13542 { 13543 rtx x = *loc; 13544 const char *fmt = GET_RTX_FORMAT (GET_CODE (x)); 13545 int len = GET_RTX_LENGTH (GET_CODE (x)); 13546 int i, j; 13547 13548 if (REG_P (x)) 13549 { 13550 unsigned int regno = REGNO (x); 13551 unsigned int endregno = END_REGNO (x); 13552 unsigned int j; 13553 13554 for (j = regno; j < endregno; j++) 13555 { 13556 reg_stat_type *rsp = ®_stat[j]; 13557 if (rsp->last_set_invalid 13558 /* If this is a pseudo-register that was only set once and not 13559 live at the beginning of the function, it is always valid. */ 13560 || (! (regno >= FIRST_PSEUDO_REGISTER 13561 && regno < reg_n_sets_max 13562 && REG_N_SETS (regno) == 1 13563 && (!REGNO_REG_SET_P 13564 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), 13565 regno))) 13566 && rsp->last_set_label > tick)) 13567 { 13568 if (replace) 13569 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); 13570 return replace; 13571 } 13572 } 13573 13574 return 1; 13575 } 13576 /* If this is a memory reference, make sure that there were no stores after 13577 it that might have clobbered the value. We don't have alias info, so we 13578 assume any store invalidates it. Moreover, we only have local UIDs, so 13579 we also assume that there were stores in the intervening basic blocks. */ 13580 else if (MEM_P (x) && !MEM_READONLY_P (x) 13581 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set)) 13582 { 13583 if (replace) 13584 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); 13585 return replace; 13586 } 13587 13588 for (i = 0; i < len; i++) 13589 { 13590 if (fmt[i] == 'e') 13591 { 13592 /* Check for identical subexpressions. If x contains 13593 identical subexpression we only have to traverse one of 13594 them. */ 13595 if (i == 1 && ARITHMETIC_P (x)) 13596 { 13597 /* Note that at this point x0 has already been checked 13598 and found valid. */ 13599 rtx x0 = XEXP (x, 0); 13600 rtx x1 = XEXP (x, 1); 13601 13602 /* If x0 and x1 are identical then x is also valid. */ 13603 if (x0 == x1) 13604 return 1; 13605 13606 /* If x1 is identical to a subexpression of x0 then 13607 while checking x0, x1 has already been checked. Thus 13608 it is valid and so as x. */ 13609 if (ARITHMETIC_P (x0) 13610 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1))) 13611 return 1; 13612 13613 /* If x0 is identical to a subexpression of x1 then x is 13614 valid iff the rest of x1 is valid. */ 13615 if (ARITHMETIC_P (x1) 13616 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1))) 13617 return 13618 get_last_value_validate (&XEXP (x1, 13619 x0 == XEXP (x1, 0) ? 1 : 0), 13620 insn, tick, replace); 13621 } 13622 13623 if (get_last_value_validate (&XEXP (x, i), insn, tick, 13624 replace) == 0) 13625 return 0; 13626 } 13627 else if (fmt[i] == 'E') 13628 for (j = 0; j < XVECLEN (x, i); j++) 13629 if (get_last_value_validate (&XVECEXP (x, i, j), 13630 insn, tick, replace) == 0) 13631 return 0; 13632 } 13633 13634 /* If we haven't found a reason for it to be invalid, it is valid. */ 13635 return 1; 13636 } 13637 13638 /* Get the last value assigned to X, if known. Some registers 13639 in the value may be replaced with (clobber (const_int 0)) if their value 13640 is known longer known reliably. */ 13641 13642 static rtx 13643 get_last_value (const_rtx x) 13644 { 13645 unsigned int regno; 13646 rtx value; 13647 reg_stat_type *rsp; 13648 13649 /* If this is a non-paradoxical SUBREG, get the value of its operand and 13650 then convert it to the desired mode. If this is a paradoxical SUBREG, 13651 we cannot predict what values the "extra" bits might have. */ 13652 if (GET_CODE (x) == SUBREG 13653 && subreg_lowpart_p (x) 13654 && !paradoxical_subreg_p (x) 13655 && (value = get_last_value (SUBREG_REG (x))) != 0) 13656 return gen_lowpart (GET_MODE (x), value); 13657 13658 if (!REG_P (x)) 13659 return 0; 13660 13661 regno = REGNO (x); 13662 rsp = ®_stat[regno]; 13663 value = rsp->last_set_value; 13664 13665 /* If we don't have a value, or if it isn't for this basic block and 13666 it's either a hard register, set more than once, or it's a live 13667 at the beginning of the function, return 0. 13668 13669 Because if it's not live at the beginning of the function then the reg 13670 is always set before being used (is never used without being set). 13671 And, if it's set only once, and it's always set before use, then all 13672 uses must have the same last value, even if it's not from this basic 13673 block. */ 13674 13675 if (value == 0 13676 || (rsp->last_set_label < label_tick_ebb_start 13677 && (regno < FIRST_PSEUDO_REGISTER 13678 || regno >= reg_n_sets_max 13679 || REG_N_SETS (regno) != 1 13680 || REGNO_REG_SET_P 13681 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno)))) 13682 return 0; 13683 13684 /* If the value was set in a later insn than the ones we are processing, 13685 we can't use it even if the register was only set once. */ 13686 if (rsp->last_set_label == label_tick 13687 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid) 13688 return 0; 13689 13690 /* If fewer bits were set than what we are asked for now, we cannot use 13691 the value. */ 13692 if (maybe_lt (GET_MODE_PRECISION (rsp->last_set_mode), 13693 GET_MODE_PRECISION (GET_MODE (x)))) 13694 return 0; 13695 13696 /* If the value has all its registers valid, return it. */ 13697 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0)) 13698 return value; 13699 13700 /* Otherwise, make a copy and replace any invalid register with 13701 (clobber (const_int 0)). If that fails for some reason, return 0. */ 13702 13703 value = copy_rtx (value); 13704 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1)) 13705 return value; 13706 13707 return 0; 13708 } 13709 13710 /* Define three variables used for communication between the following 13711 routines. */ 13712 13713 static unsigned int reg_dead_regno, reg_dead_endregno; 13714 static int reg_dead_flag; 13715 13716 /* Function called via note_stores from reg_dead_at_p. 13717 13718 If DEST is within [reg_dead_regno, reg_dead_endregno), set 13719 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */ 13720 13721 static void 13722 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED) 13723 { 13724 unsigned int regno, endregno; 13725 13726 if (!REG_P (dest)) 13727 return; 13728 13729 regno = REGNO (dest); 13730 endregno = END_REGNO (dest); 13731 if (reg_dead_endregno > regno && reg_dead_regno < endregno) 13732 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1; 13733 } 13734 13735 /* Return nonzero if REG is known to be dead at INSN. 13736 13737 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER 13738 referencing REG, it is dead. If we hit a SET referencing REG, it is 13739 live. Otherwise, see if it is live or dead at the start of the basic 13740 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS 13741 must be assumed to be always live. */ 13742 13743 static int 13744 reg_dead_at_p (rtx reg, rtx_insn *insn) 13745 { 13746 basic_block block; 13747 unsigned int i; 13748 13749 /* Set variables for reg_dead_at_p_1. */ 13750 reg_dead_regno = REGNO (reg); 13751 reg_dead_endregno = END_REGNO (reg); 13752 13753 reg_dead_flag = 0; 13754 13755 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers 13756 we allow the machine description to decide whether use-and-clobber 13757 patterns are OK. */ 13758 if (reg_dead_regno < FIRST_PSEUDO_REGISTER) 13759 { 13760 for (i = reg_dead_regno; i < reg_dead_endregno; i++) 13761 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i)) 13762 return 0; 13763 } 13764 13765 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or 13766 beginning of basic block. */ 13767 block = BLOCK_FOR_INSN (insn); 13768 for (;;) 13769 { 13770 if (INSN_P (insn)) 13771 { 13772 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno)) 13773 return 1; 13774 13775 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL); 13776 if (reg_dead_flag) 13777 return reg_dead_flag == 1 ? 1 : 0; 13778 13779 if (find_regno_note (insn, REG_DEAD, reg_dead_regno)) 13780 return 1; 13781 } 13782 13783 if (insn == BB_HEAD (block)) 13784 break; 13785 13786 insn = PREV_INSN (insn); 13787 } 13788 13789 /* Look at live-in sets for the basic block that we were in. */ 13790 for (i = reg_dead_regno; i < reg_dead_endregno; i++) 13791 if (REGNO_REG_SET_P (df_get_live_in (block), i)) 13792 return 0; 13793 13794 return 1; 13795 } 13796 13797 /* Note hard registers in X that are used. */ 13798 13799 static void 13800 mark_used_regs_combine (rtx x) 13801 { 13802 RTX_CODE code = GET_CODE (x); 13803 unsigned int regno; 13804 int i; 13805 13806 switch (code) 13807 { 13808 case LABEL_REF: 13809 case SYMBOL_REF: 13810 case CONST: 13811 CASE_CONST_ANY: 13812 case PC: 13813 case ADDR_VEC: 13814 case ADDR_DIFF_VEC: 13815 case ASM_INPUT: 13816 /* CC0 must die in the insn after it is set, so we don't need to take 13817 special note of it here. */ 13818 case CC0: 13819 return; 13820 13821 case CLOBBER: 13822 /* If we are clobbering a MEM, mark any hard registers inside the 13823 address as used. */ 13824 if (MEM_P (XEXP (x, 0))) 13825 mark_used_regs_combine (XEXP (XEXP (x, 0), 0)); 13826 return; 13827 13828 case REG: 13829 regno = REGNO (x); 13830 /* A hard reg in a wide mode may really be multiple registers. 13831 If so, mark all of them just like the first. */ 13832 if (regno < FIRST_PSEUDO_REGISTER) 13833 { 13834 /* None of this applies to the stack, frame or arg pointers. */ 13835 if (regno == STACK_POINTER_REGNUM 13836 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER 13837 && regno == HARD_FRAME_POINTER_REGNUM) 13838 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM 13839 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]) 13840 || regno == FRAME_POINTER_REGNUM) 13841 return; 13842 13843 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno); 13844 } 13845 return; 13846 13847 case SET: 13848 { 13849 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in 13850 the address. */ 13851 rtx testreg = SET_DEST (x); 13852 13853 while (GET_CODE (testreg) == SUBREG 13854 || GET_CODE (testreg) == ZERO_EXTRACT 13855 || GET_CODE (testreg) == STRICT_LOW_PART) 13856 testreg = XEXP (testreg, 0); 13857 13858 if (MEM_P (testreg)) 13859 mark_used_regs_combine (XEXP (testreg, 0)); 13860 13861 mark_used_regs_combine (SET_SRC (x)); 13862 } 13863 return; 13864 13865 default: 13866 break; 13867 } 13868 13869 /* Recursively scan the operands of this expression. */ 13870 13871 { 13872 const char *fmt = GET_RTX_FORMAT (code); 13873 13874 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) 13875 { 13876 if (fmt[i] == 'e') 13877 mark_used_regs_combine (XEXP (x, i)); 13878 else if (fmt[i] == 'E') 13879 { 13880 int j; 13881 13882 for (j = 0; j < XVECLEN (x, i); j++) 13883 mark_used_regs_combine (XVECEXP (x, i, j)); 13884 } 13885 } 13886 } 13887 } 13888 13889 /* Remove register number REGNO from the dead registers list of INSN. 13890 13891 Return the note used to record the death, if there was one. */ 13892 13893 rtx 13894 remove_death (unsigned int regno, rtx_insn *insn) 13895 { 13896 rtx note = find_regno_note (insn, REG_DEAD, regno); 13897 13898 if (note) 13899 remove_note (insn, note); 13900 13901 return note; 13902 } 13903 13904 /* For each register (hardware or pseudo) used within expression X, if its 13905 death is in an instruction with luid between FROM_LUID (inclusive) and 13906 TO_INSN (exclusive), put a REG_DEAD note for that register in the 13907 list headed by PNOTES. 13908 13909 That said, don't move registers killed by maybe_kill_insn. 13910 13911 This is done when X is being merged by combination into TO_INSN. These 13912 notes will then be distributed as needed. */ 13913 13914 static void 13915 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn, 13916 rtx *pnotes) 13917 { 13918 const char *fmt; 13919 int len, i; 13920 enum rtx_code code = GET_CODE (x); 13921 13922 if (code == REG) 13923 { 13924 unsigned int regno = REGNO (x); 13925 rtx_insn *where_dead = reg_stat[regno].last_death; 13926 13927 /* If we do not know where the register died, it may still die between 13928 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */ 13929 if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn)) 13930 { 13931 rtx_insn *insn = prev_real_nondebug_insn (to_insn); 13932 while (insn 13933 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (to_insn) 13934 && DF_INSN_LUID (insn) >= from_luid) 13935 { 13936 if (dead_or_set_regno_p (insn, regno)) 13937 { 13938 if (find_regno_note (insn, REG_DEAD, regno)) 13939 where_dead = insn; 13940 break; 13941 } 13942 13943 insn = prev_real_nondebug_insn (insn); 13944 } 13945 } 13946 13947 /* Don't move the register if it gets killed in between from and to. */ 13948 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn) 13949 && ! reg_referenced_p (x, maybe_kill_insn)) 13950 return; 13951 13952 if (where_dead 13953 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn) 13954 && DF_INSN_LUID (where_dead) >= from_luid 13955 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn)) 13956 { 13957 rtx note = remove_death (regno, where_dead); 13958 13959 /* It is possible for the call above to return 0. This can occur 13960 when last_death points to I2 or I1 that we combined with. 13961 In that case make a new note. 13962 13963 We must also check for the case where X is a hard register 13964 and NOTE is a death note for a range of hard registers 13965 including X. In that case, we must put REG_DEAD notes for 13966 the remaining registers in place of NOTE. */ 13967 13968 if (note != 0 && regno < FIRST_PSEUDO_REGISTER 13969 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0)))) 13970 { 13971 unsigned int deadregno = REGNO (XEXP (note, 0)); 13972 unsigned int deadend = END_REGNO (XEXP (note, 0)); 13973 unsigned int ourend = END_REGNO (x); 13974 unsigned int i; 13975 13976 for (i = deadregno; i < deadend; i++) 13977 if (i < regno || i >= ourend) 13978 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]); 13979 } 13980 13981 /* If we didn't find any note, or if we found a REG_DEAD note that 13982 covers only part of the given reg, and we have a multi-reg hard 13983 register, then to be safe we must check for REG_DEAD notes 13984 for each register other than the first. They could have 13985 their own REG_DEAD notes lying around. */ 13986 else if ((note == 0 13987 || (note != 0 13988 && partial_subreg_p (GET_MODE (XEXP (note, 0)), 13989 GET_MODE (x)))) 13990 && regno < FIRST_PSEUDO_REGISTER 13991 && REG_NREGS (x) > 1) 13992 { 13993 unsigned int ourend = END_REGNO (x); 13994 unsigned int i, offset; 13995 rtx oldnotes = 0; 13996 13997 if (note) 13998 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0))); 13999 else 14000 offset = 1; 14001 14002 for (i = regno + offset; i < ourend; i++) 14003 move_deaths (regno_reg_rtx[i], 14004 maybe_kill_insn, from_luid, to_insn, &oldnotes); 14005 } 14006 14007 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x)) 14008 { 14009 XEXP (note, 1) = *pnotes; 14010 *pnotes = note; 14011 } 14012 else 14013 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes); 14014 } 14015 14016 return; 14017 } 14018 14019 else if (GET_CODE (x) == SET) 14020 { 14021 rtx dest = SET_DEST (x); 14022 14023 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes); 14024 14025 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG 14026 that accesses one word of a multi-word item, some 14027 piece of everything register in the expression is used by 14028 this insn, so remove any old death. */ 14029 /* ??? So why do we test for equality of the sizes? */ 14030 14031 if (GET_CODE (dest) == ZERO_EXTRACT 14032 || GET_CODE (dest) == STRICT_LOW_PART 14033 || (GET_CODE (dest) == SUBREG 14034 && !read_modify_subreg_p (dest))) 14035 { 14036 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes); 14037 return; 14038 } 14039 14040 /* If this is some other SUBREG, we know it replaces the entire 14041 value, so use that as the destination. */ 14042 if (GET_CODE (dest) == SUBREG) 14043 dest = SUBREG_REG (dest); 14044 14045 /* If this is a MEM, adjust deaths of anything used in the address. 14046 For a REG (the only other possibility), the entire value is 14047 being replaced so the old value is not used in this insn. */ 14048 14049 if (MEM_P (dest)) 14050 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid, 14051 to_insn, pnotes); 14052 return; 14053 } 14054 14055 else if (GET_CODE (x) == CLOBBER) 14056 return; 14057 14058 len = GET_RTX_LENGTH (code); 14059 fmt = GET_RTX_FORMAT (code); 14060 14061 for (i = 0; i < len; i++) 14062 { 14063 if (fmt[i] == 'E') 14064 { 14065 int j; 14066 for (j = XVECLEN (x, i) - 1; j >= 0; j--) 14067 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid, 14068 to_insn, pnotes); 14069 } 14070 else if (fmt[i] == 'e') 14071 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes); 14072 } 14073 } 14074 14075 /* Return 1 if X is the target of a bit-field assignment in BODY, the 14076 pattern of an insn. X must be a REG. */ 14077 14078 static int 14079 reg_bitfield_target_p (rtx x, rtx body) 14080 { 14081 int i; 14082 14083 if (GET_CODE (body) == SET) 14084 { 14085 rtx dest = SET_DEST (body); 14086 rtx target; 14087 unsigned int regno, tregno, endregno, endtregno; 14088 14089 if (GET_CODE (dest) == ZERO_EXTRACT) 14090 target = XEXP (dest, 0); 14091 else if (GET_CODE (dest) == STRICT_LOW_PART) 14092 target = SUBREG_REG (XEXP (dest, 0)); 14093 else 14094 return 0; 14095 14096 if (GET_CODE (target) == SUBREG) 14097 target = SUBREG_REG (target); 14098 14099 if (!REG_P (target)) 14100 return 0; 14101 14102 tregno = REGNO (target), regno = REGNO (x); 14103 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER) 14104 return target == x; 14105 14106 endtregno = end_hard_regno (GET_MODE (target), tregno); 14107 endregno = end_hard_regno (GET_MODE (x), regno); 14108 14109 return endregno > tregno && regno < endtregno; 14110 } 14111 14112 else if (GET_CODE (body) == PARALLEL) 14113 for (i = XVECLEN (body, 0) - 1; i >= 0; i--) 14114 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i))) 14115 return 1; 14116 14117 return 0; 14118 } 14119 14120 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them 14121 as appropriate. I3 and I2 are the insns resulting from the combination 14122 insns including FROM (I2 may be zero). 14123 14124 ELIM_I2 and ELIM_I1 are either zero or registers that we know will 14125 not need REG_DEAD notes because they are being substituted for. This 14126 saves searching in the most common cases. 14127 14128 Each note in the list is either ignored or placed on some insns, depending 14129 on the type of note. */ 14130 14131 static void 14132 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2, 14133 rtx elim_i2, rtx elim_i1, rtx elim_i0) 14134 { 14135 rtx note, next_note; 14136 rtx tem_note; 14137 rtx_insn *tem_insn; 14138 14139 for (note = notes; note; note = next_note) 14140 { 14141 rtx_insn *place = 0, *place2 = 0; 14142 14143 next_note = XEXP (note, 1); 14144 switch (REG_NOTE_KIND (note)) 14145 { 14146 case REG_BR_PROB: 14147 case REG_BR_PRED: 14148 /* Doesn't matter much where we put this, as long as it's somewhere. 14149 It is preferable to keep these notes on branches, which is most 14150 likely to be i3. */ 14151 place = i3; 14152 break; 14153 14154 case REG_NON_LOCAL_GOTO: 14155 if (JUMP_P (i3)) 14156 place = i3; 14157 else 14158 { 14159 gcc_assert (i2 && JUMP_P (i2)); 14160 place = i2; 14161 } 14162 break; 14163 14164 case REG_EH_REGION: 14165 /* These notes must remain with the call or trapping instruction. */ 14166 if (CALL_P (i3)) 14167 place = i3; 14168 else if (i2 && CALL_P (i2)) 14169 place = i2; 14170 else 14171 { 14172 gcc_assert (cfun->can_throw_non_call_exceptions); 14173 if (may_trap_p (i3)) 14174 place = i3; 14175 else if (i2 && may_trap_p (i2)) 14176 place = i2; 14177 /* ??? Otherwise assume we've combined things such that we 14178 can now prove that the instructions can't trap. Drop the 14179 note in this case. */ 14180 } 14181 break; 14182 14183 case REG_ARGS_SIZE: 14184 /* ??? How to distribute between i3-i1. Assume i3 contains the 14185 entire adjustment. Assert i3 contains at least some adjust. */ 14186 if (!noop_move_p (i3)) 14187 { 14188 poly_int64 old_size, args_size = get_args_size (note); 14189 /* fixup_args_size_notes looks at REG_NORETURN note, 14190 so ensure the note is placed there first. */ 14191 if (CALL_P (i3)) 14192 { 14193 rtx *np; 14194 for (np = &next_note; *np; np = &XEXP (*np, 1)) 14195 if (REG_NOTE_KIND (*np) == REG_NORETURN) 14196 { 14197 rtx n = *np; 14198 *np = XEXP (n, 1); 14199 XEXP (n, 1) = REG_NOTES (i3); 14200 REG_NOTES (i3) = n; 14201 break; 14202 } 14203 } 14204 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size); 14205 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS 14206 REG_ARGS_SIZE note to all noreturn calls, allow that here. */ 14207 gcc_assert (maybe_ne (old_size, args_size) 14208 || (CALL_P (i3) 14209 && !ACCUMULATE_OUTGOING_ARGS 14210 && find_reg_note (i3, REG_NORETURN, NULL_RTX))); 14211 } 14212 break; 14213 14214 case REG_NORETURN: 14215 case REG_SETJMP: 14216 case REG_TM: 14217 case REG_CALL_DECL: 14218 case REG_CALL_NOCF_CHECK: 14219 /* These notes must remain with the call. It should not be 14220 possible for both I2 and I3 to be a call. */ 14221 if (CALL_P (i3)) 14222 place = i3; 14223 else 14224 { 14225 gcc_assert (i2 && CALL_P (i2)); 14226 place = i2; 14227 } 14228 break; 14229 14230 case REG_UNUSED: 14231 /* Any clobbers for i3 may still exist, and so we must process 14232 REG_UNUSED notes from that insn. 14233 14234 Any clobbers from i2 or i1 can only exist if they were added by 14235 recog_for_combine. In that case, recog_for_combine created the 14236 necessary REG_UNUSED notes. Trying to keep any original 14237 REG_UNUSED notes from these insns can cause incorrect output 14238 if it is for the same register as the original i3 dest. 14239 In that case, we will notice that the register is set in i3, 14240 and then add a REG_UNUSED note for the destination of i3, which 14241 is wrong. However, it is possible to have REG_UNUSED notes from 14242 i2 or i1 for register which were both used and clobbered, so 14243 we keep notes from i2 or i1 if they will turn into REG_DEAD 14244 notes. */ 14245 14246 /* If this register is set or clobbered in I3, put the note there 14247 unless there is one already. */ 14248 if (reg_set_p (XEXP (note, 0), PATTERN (i3))) 14249 { 14250 if (from_insn != i3) 14251 break; 14252 14253 if (! (REG_P (XEXP (note, 0)) 14254 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0))) 14255 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0)))) 14256 place = i3; 14257 } 14258 /* Otherwise, if this register is used by I3, then this register 14259 now dies here, so we must put a REG_DEAD note here unless there 14260 is one already. */ 14261 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)) 14262 && ! (REG_P (XEXP (note, 0)) 14263 ? find_regno_note (i3, REG_DEAD, 14264 REGNO (XEXP (note, 0))) 14265 : find_reg_note (i3, REG_DEAD, XEXP (note, 0)))) 14266 { 14267 PUT_REG_NOTE_KIND (note, REG_DEAD); 14268 place = i3; 14269 } 14270 14271 /* A SET or CLOBBER of the REG_UNUSED reg has been removed, 14272 but we can't tell which at this point. We must reset any 14273 expectations we had about the value that was previously 14274 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS 14275 and, if appropriate, restore its previous value, but we 14276 don't have enough information for that at this point. */ 14277 else 14278 { 14279 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX); 14280 14281 /* Otherwise, if this register is now referenced in i2 14282 then the register used to be modified in one of the 14283 original insns. If it was i3 (say, in an unused 14284 parallel), it's now completely gone, so the note can 14285 be discarded. But if it was modified in i2, i1 or i0 14286 and we still reference it in i2, then we're 14287 referencing the previous value, and since the 14288 register was modified and REG_UNUSED, we know that 14289 the previous value is now dead. So, if we only 14290 reference the register in i2, we change the note to 14291 REG_DEAD, to reflect the previous value. However, if 14292 we're also setting or clobbering the register as 14293 scratch, we know (because the register was not 14294 referenced in i3) that it's unused, just as it was 14295 unused before, and we place the note in i2. */ 14296 if (from_insn != i3 && i2 && INSN_P (i2) 14297 && reg_referenced_p (XEXP (note, 0), PATTERN (i2))) 14298 { 14299 if (!reg_set_p (XEXP (note, 0), PATTERN (i2))) 14300 PUT_REG_NOTE_KIND (note, REG_DEAD); 14301 if (! (REG_P (XEXP (note, 0)) 14302 ? find_regno_note (i2, REG_NOTE_KIND (note), 14303 REGNO (XEXP (note, 0))) 14304 : find_reg_note (i2, REG_NOTE_KIND (note), 14305 XEXP (note, 0)))) 14306 place = i2; 14307 } 14308 } 14309 14310 break; 14311 14312 case REG_EQUAL: 14313 case REG_EQUIV: 14314 case REG_NOALIAS: 14315 /* These notes say something about results of an insn. We can 14316 only support them if they used to be on I3 in which case they 14317 remain on I3. Otherwise they are ignored. 14318 14319 If the note refers to an expression that is not a constant, we 14320 must also ignore the note since we cannot tell whether the 14321 equivalence is still true. It might be possible to do 14322 slightly better than this (we only have a problem if I2DEST 14323 or I1DEST is present in the expression), but it doesn't 14324 seem worth the trouble. */ 14325 14326 if (from_insn == i3 14327 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0)))) 14328 place = i3; 14329 break; 14330 14331 case REG_INC: 14332 /* These notes say something about how a register is used. They must 14333 be present on any use of the register in I2 or I3. */ 14334 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))) 14335 place = i3; 14336 14337 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2))) 14338 { 14339 if (place) 14340 place2 = i2; 14341 else 14342 place = i2; 14343 } 14344 break; 14345 14346 case REG_LABEL_TARGET: 14347 case REG_LABEL_OPERAND: 14348 /* This can show up in several ways -- either directly in the 14349 pattern, or hidden off in the constant pool with (or without?) 14350 a REG_EQUAL note. */ 14351 /* ??? Ignore the without-reg_equal-note problem for now. */ 14352 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)) 14353 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX)) 14354 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF 14355 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))) 14356 place = i3; 14357 14358 if (i2 14359 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2)) 14360 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX)) 14361 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF 14362 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))) 14363 { 14364 if (place) 14365 place2 = i2; 14366 else 14367 place = i2; 14368 } 14369 14370 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note 14371 as a JUMP_LABEL or decrement LABEL_NUSES if it's already 14372 there. */ 14373 if (place && JUMP_P (place) 14374 && REG_NOTE_KIND (note) == REG_LABEL_TARGET 14375 && (JUMP_LABEL (place) == NULL 14376 || JUMP_LABEL (place) == XEXP (note, 0))) 14377 { 14378 rtx label = JUMP_LABEL (place); 14379 14380 if (!label) 14381 JUMP_LABEL (place) = XEXP (note, 0); 14382 else if (LABEL_P (label)) 14383 LABEL_NUSES (label)--; 14384 } 14385 14386 if (place2 && JUMP_P (place2) 14387 && REG_NOTE_KIND (note) == REG_LABEL_TARGET 14388 && (JUMP_LABEL (place2) == NULL 14389 || JUMP_LABEL (place2) == XEXP (note, 0))) 14390 { 14391 rtx label = JUMP_LABEL (place2); 14392 14393 if (!label) 14394 JUMP_LABEL (place2) = XEXP (note, 0); 14395 else if (LABEL_P (label)) 14396 LABEL_NUSES (label)--; 14397 place2 = 0; 14398 } 14399 break; 14400 14401 case REG_NONNEG: 14402 /* This note says something about the value of a register prior 14403 to the execution of an insn. It is too much trouble to see 14404 if the note is still correct in all situations. It is better 14405 to simply delete it. */ 14406 break; 14407 14408 case REG_DEAD: 14409 /* If we replaced the right hand side of FROM_INSN with a 14410 REG_EQUAL note, the original use of the dying register 14411 will not have been combined into I3 and I2. In such cases, 14412 FROM_INSN is guaranteed to be the first of the combined 14413 instructions, so we simply need to search back before 14414 FROM_INSN for the previous use or set of this register, 14415 then alter the notes there appropriately. 14416 14417 If the register is used as an input in I3, it dies there. 14418 Similarly for I2, if it is nonzero and adjacent to I3. 14419 14420 If the register is not used as an input in either I3 or I2 14421 and it is not one of the registers we were supposed to eliminate, 14422 there are two possibilities. We might have a non-adjacent I2 14423 or we might have somehow eliminated an additional register 14424 from a computation. For example, we might have had A & B where 14425 we discover that B will always be zero. In this case we will 14426 eliminate the reference to A. 14427 14428 In both cases, we must search to see if we can find a previous 14429 use of A and put the death note there. */ 14430 14431 if (from_insn 14432 && from_insn == i2mod 14433 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs)) 14434 tem_insn = from_insn; 14435 else 14436 { 14437 if (from_insn 14438 && CALL_P (from_insn) 14439 && find_reg_fusage (from_insn, USE, XEXP (note, 0))) 14440 place = from_insn; 14441 else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2))) 14442 { 14443 /* If the new I2 sets the same register that is marked 14444 dead in the note, we do not in general know where to 14445 put the note. One important case we _can_ handle is 14446 when the note comes from I3. */ 14447 if (from_insn == i3) 14448 place = i3; 14449 else 14450 break; 14451 } 14452 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))) 14453 place = i3; 14454 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3 14455 && reg_referenced_p (XEXP (note, 0), PATTERN (i2))) 14456 place = i2; 14457 else if ((rtx_equal_p (XEXP (note, 0), elim_i2) 14458 && !(i2mod 14459 && reg_overlap_mentioned_p (XEXP (note, 0), 14460 i2mod_old_rhs))) 14461 || rtx_equal_p (XEXP (note, 0), elim_i1) 14462 || rtx_equal_p (XEXP (note, 0), elim_i0)) 14463 break; 14464 tem_insn = i3; 14465 } 14466 14467 if (place == 0) 14468 { 14469 basic_block bb = this_basic_block; 14470 14471 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn)) 14472 { 14473 if (!NONDEBUG_INSN_P (tem_insn)) 14474 { 14475 if (tem_insn == BB_HEAD (bb)) 14476 break; 14477 continue; 14478 } 14479 14480 /* If the register is being set at TEM_INSN, see if that is all 14481 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this 14482 into a REG_UNUSED note instead. Don't delete sets to 14483 global register vars. */ 14484 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER 14485 || !global_regs[REGNO (XEXP (note, 0))]) 14486 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn))) 14487 { 14488 rtx set = single_set (tem_insn); 14489 rtx inner_dest = 0; 14490 rtx_insn *cc0_setter = NULL; 14491 14492 if (set != 0) 14493 for (inner_dest = SET_DEST (set); 14494 (GET_CODE (inner_dest) == STRICT_LOW_PART 14495 || GET_CODE (inner_dest) == SUBREG 14496 || GET_CODE (inner_dest) == ZERO_EXTRACT); 14497 inner_dest = XEXP (inner_dest, 0)) 14498 ; 14499 14500 /* Verify that it was the set, and not a clobber that 14501 modified the register. 14502 14503 CC0 targets must be careful to maintain setter/user 14504 pairs. If we cannot delete the setter due to side 14505 effects, mark the user with an UNUSED note instead 14506 of deleting it. */ 14507 14508 if (set != 0 && ! side_effects_p (SET_SRC (set)) 14509 && rtx_equal_p (XEXP (note, 0), inner_dest) 14510 && (!HAVE_cc0 14511 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set)) 14512 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL 14513 && sets_cc0_p (PATTERN (cc0_setter)) > 0)))) 14514 { 14515 /* Move the notes and links of TEM_INSN elsewhere. 14516 This might delete other dead insns recursively. 14517 First set the pattern to something that won't use 14518 any register. */ 14519 rtx old_notes = REG_NOTES (tem_insn); 14520 14521 PATTERN (tem_insn) = pc_rtx; 14522 REG_NOTES (tem_insn) = NULL; 14523 14524 distribute_notes (old_notes, tem_insn, tem_insn, NULL, 14525 NULL_RTX, NULL_RTX, NULL_RTX); 14526 distribute_links (LOG_LINKS (tem_insn)); 14527 14528 unsigned int regno = REGNO (XEXP (note, 0)); 14529 reg_stat_type *rsp = ®_stat[regno]; 14530 if (rsp->last_set == tem_insn) 14531 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX); 14532 14533 SET_INSN_DELETED (tem_insn); 14534 if (tem_insn == i2) 14535 i2 = NULL; 14536 14537 /* Delete the setter too. */ 14538 if (cc0_setter) 14539 { 14540 PATTERN (cc0_setter) = pc_rtx; 14541 old_notes = REG_NOTES (cc0_setter); 14542 REG_NOTES (cc0_setter) = NULL; 14543 14544 distribute_notes (old_notes, cc0_setter, 14545 cc0_setter, NULL, 14546 NULL_RTX, NULL_RTX, NULL_RTX); 14547 distribute_links (LOG_LINKS (cc0_setter)); 14548 14549 SET_INSN_DELETED (cc0_setter); 14550 if (cc0_setter == i2) 14551 i2 = NULL; 14552 } 14553 } 14554 else 14555 { 14556 PUT_REG_NOTE_KIND (note, REG_UNUSED); 14557 14558 /* If there isn't already a REG_UNUSED note, put one 14559 here. Do not place a REG_DEAD note, even if 14560 the register is also used here; that would not 14561 match the algorithm used in lifetime analysis 14562 and can cause the consistency check in the 14563 scheduler to fail. */ 14564 if (! find_regno_note (tem_insn, REG_UNUSED, 14565 REGNO (XEXP (note, 0)))) 14566 place = tem_insn; 14567 break; 14568 } 14569 } 14570 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn)) 14571 || (CALL_P (tem_insn) 14572 && find_reg_fusage (tem_insn, USE, XEXP (note, 0)))) 14573 { 14574 place = tem_insn; 14575 14576 /* If we are doing a 3->2 combination, and we have a 14577 register which formerly died in i3 and was not used 14578 by i2, which now no longer dies in i3 and is used in 14579 i2 but does not die in i2, and place is between i2 14580 and i3, then we may need to move a link from place to 14581 i2. */ 14582 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2) 14583 && from_insn 14584 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2) 14585 && reg_referenced_p (XEXP (note, 0), PATTERN (i2))) 14586 { 14587 struct insn_link *links = LOG_LINKS (place); 14588 LOG_LINKS (place) = NULL; 14589 distribute_links (links); 14590 } 14591 break; 14592 } 14593 14594 if (tem_insn == BB_HEAD (bb)) 14595 break; 14596 } 14597 14598 } 14599 14600 /* If the register is set or already dead at PLACE, we needn't do 14601 anything with this note if it is still a REG_DEAD note. 14602 We check here if it is set at all, not if is it totally replaced, 14603 which is what `dead_or_set_p' checks, so also check for it being 14604 set partially. */ 14605 14606 if (place && REG_NOTE_KIND (note) == REG_DEAD) 14607 { 14608 unsigned int regno = REGNO (XEXP (note, 0)); 14609 reg_stat_type *rsp = ®_stat[regno]; 14610 14611 if (dead_or_set_p (place, XEXP (note, 0)) 14612 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place))) 14613 { 14614 /* Unless the register previously died in PLACE, clear 14615 last_death. [I no longer understand why this is 14616 being done.] */ 14617 if (rsp->last_death != place) 14618 rsp->last_death = 0; 14619 place = 0; 14620 } 14621 else 14622 rsp->last_death = place; 14623 14624 /* If this is a death note for a hard reg that is occupying 14625 multiple registers, ensure that we are still using all 14626 parts of the object. If we find a piece of the object 14627 that is unused, we must arrange for an appropriate REG_DEAD 14628 note to be added for it. However, we can't just emit a USE 14629 and tag the note to it, since the register might actually 14630 be dead; so we recourse, and the recursive call then finds 14631 the previous insn that used this register. */ 14632 14633 if (place && REG_NREGS (XEXP (note, 0)) > 1) 14634 { 14635 unsigned int endregno = END_REGNO (XEXP (note, 0)); 14636 bool all_used = true; 14637 unsigned int i; 14638 14639 for (i = regno; i < endregno; i++) 14640 if ((! refers_to_regno_p (i, PATTERN (place)) 14641 && ! find_regno_fusage (place, USE, i)) 14642 || dead_or_set_regno_p (place, i)) 14643 { 14644 all_used = false; 14645 break; 14646 } 14647 14648 if (! all_used) 14649 { 14650 /* Put only REG_DEAD notes for pieces that are 14651 not already dead or set. */ 14652 14653 for (i = regno; i < endregno; 14654 i += hard_regno_nregs (i, reg_raw_mode[i])) 14655 { 14656 rtx piece = regno_reg_rtx[i]; 14657 basic_block bb = this_basic_block; 14658 14659 if (! dead_or_set_p (place, piece) 14660 && ! reg_bitfield_target_p (piece, 14661 PATTERN (place))) 14662 { 14663 rtx new_note = alloc_reg_note (REG_DEAD, piece, 14664 NULL_RTX); 14665 14666 distribute_notes (new_note, place, place, 14667 NULL, NULL_RTX, NULL_RTX, 14668 NULL_RTX); 14669 } 14670 else if (! refers_to_regno_p (i, PATTERN (place)) 14671 && ! find_regno_fusage (place, USE, i)) 14672 for (tem_insn = PREV_INSN (place); ; 14673 tem_insn = PREV_INSN (tem_insn)) 14674 { 14675 if (!NONDEBUG_INSN_P (tem_insn)) 14676 { 14677 if (tem_insn == BB_HEAD (bb)) 14678 break; 14679 continue; 14680 } 14681 if (dead_or_set_p (tem_insn, piece) 14682 || reg_bitfield_target_p (piece, 14683 PATTERN (tem_insn))) 14684 { 14685 add_reg_note (tem_insn, REG_UNUSED, piece); 14686 break; 14687 } 14688 } 14689 } 14690 14691 place = 0; 14692 } 14693 } 14694 } 14695 break; 14696 14697 default: 14698 /* Any other notes should not be present at this point in the 14699 compilation. */ 14700 gcc_unreachable (); 14701 } 14702 14703 if (place) 14704 { 14705 XEXP (note, 1) = REG_NOTES (place); 14706 REG_NOTES (place) = note; 14707 14708 /* Set added_notes_insn to the earliest insn we added a note to. */ 14709 if (added_notes_insn == 0 14710 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place)) 14711 added_notes_insn = place; 14712 } 14713 14714 if (place2) 14715 { 14716 add_shallow_copy_of_reg_note (place2, note); 14717 14718 /* Set added_notes_insn to the earliest insn we added a note to. */ 14719 if (added_notes_insn == 0 14720 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2)) 14721 added_notes_insn = place2; 14722 } 14723 } 14724 } 14725 14726 /* Similarly to above, distribute the LOG_LINKS that used to be present on 14727 I3, I2, and I1 to new locations. This is also called to add a link 14728 pointing at I3 when I3's destination is changed. */ 14729 14730 static void 14731 distribute_links (struct insn_link *links) 14732 { 14733 struct insn_link *link, *next_link; 14734 14735 for (link = links; link; link = next_link) 14736 { 14737 rtx_insn *place = 0; 14738 rtx_insn *insn; 14739 rtx set, reg; 14740 14741 next_link = link->next; 14742 14743 /* If the insn that this link points to is a NOTE, ignore it. */ 14744 if (NOTE_P (link->insn)) 14745 continue; 14746 14747 set = 0; 14748 rtx pat = PATTERN (link->insn); 14749 if (GET_CODE (pat) == SET) 14750 set = pat; 14751 else if (GET_CODE (pat) == PARALLEL) 14752 { 14753 int i; 14754 for (i = 0; i < XVECLEN (pat, 0); i++) 14755 { 14756 set = XVECEXP (pat, 0, i); 14757 if (GET_CODE (set) != SET) 14758 continue; 14759 14760 reg = SET_DEST (set); 14761 while (GET_CODE (reg) == ZERO_EXTRACT 14762 || GET_CODE (reg) == STRICT_LOW_PART 14763 || GET_CODE (reg) == SUBREG) 14764 reg = XEXP (reg, 0); 14765 14766 if (!REG_P (reg)) 14767 continue; 14768 14769 if (REGNO (reg) == link->regno) 14770 break; 14771 } 14772 if (i == XVECLEN (pat, 0)) 14773 continue; 14774 } 14775 else 14776 continue; 14777 14778 reg = SET_DEST (set); 14779 14780 while (GET_CODE (reg) == ZERO_EXTRACT 14781 || GET_CODE (reg) == STRICT_LOW_PART 14782 || GET_CODE (reg) == SUBREG) 14783 reg = XEXP (reg, 0); 14784 14785 if (reg == pc_rtx) 14786 continue; 14787 14788 /* A LOG_LINK is defined as being placed on the first insn that uses 14789 a register and points to the insn that sets the register. Start 14790 searching at the next insn after the target of the link and stop 14791 when we reach a set of the register or the end of the basic block. 14792 14793 Note that this correctly handles the link that used to point from 14794 I3 to I2. Also note that not much searching is typically done here 14795 since most links don't point very far away. */ 14796 14797 for (insn = NEXT_INSN (link->insn); 14798 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) 14799 || BB_HEAD (this_basic_block->next_bb) != insn)); 14800 insn = NEXT_INSN (insn)) 14801 if (DEBUG_INSN_P (insn)) 14802 continue; 14803 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn))) 14804 { 14805 if (reg_referenced_p (reg, PATTERN (insn))) 14806 place = insn; 14807 break; 14808 } 14809 else if (CALL_P (insn) 14810 && find_reg_fusage (insn, USE, reg)) 14811 { 14812 place = insn; 14813 break; 14814 } 14815 else if (INSN_P (insn) && reg_set_p (reg, insn)) 14816 break; 14817 14818 /* If we found a place to put the link, place it there unless there 14819 is already a link to the same insn as LINK at that point. */ 14820 14821 if (place) 14822 { 14823 struct insn_link *link2; 14824 14825 FOR_EACH_LOG_LINK (link2, place) 14826 if (link2->insn == link->insn && link2->regno == link->regno) 14827 break; 14828 14829 if (link2 == NULL) 14830 { 14831 link->next = LOG_LINKS (place); 14832 LOG_LINKS (place) = link; 14833 14834 /* Set added_links_insn to the earliest insn we added a 14835 link to. */ 14836 if (added_links_insn == 0 14837 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place)) 14838 added_links_insn = place; 14839 } 14840 } 14841 } 14842 } 14843 14844 /* Check for any register or memory mentioned in EQUIV that is not 14845 mentioned in EXPR. This is used to restrict EQUIV to "specializations" 14846 of EXPR where some registers may have been replaced by constants. */ 14847 14848 static bool 14849 unmentioned_reg_p (rtx equiv, rtx expr) 14850 { 14851 subrtx_iterator::array_type array; 14852 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST) 14853 { 14854 const_rtx x = *iter; 14855 if ((REG_P (x) || MEM_P (x)) 14856 && !reg_mentioned_p (x, expr)) 14857 return true; 14858 } 14859 return false; 14860 } 14861 14862 DEBUG_FUNCTION void 14863 dump_combine_stats (FILE *file) 14864 { 14865 fprintf 14866 (file, 14867 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n", 14868 combine_attempts, combine_merges, combine_extras, combine_successes); 14869 } 14870 14871 void 14872 dump_combine_total_stats (FILE *file) 14873 { 14874 fprintf 14875 (file, 14876 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n", 14877 total_attempts, total_merges, total_extras, total_successes); 14878 } 14879 14880 /* Try combining insns through substitution. */ 14881 static unsigned int 14882 rest_of_handle_combine (void) 14883 { 14884 int rebuild_jump_labels_after_combine; 14885 14886 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN); 14887 df_note_add_problem (); 14888 df_analyze (); 14889 14890 regstat_init_n_sets_and_refs (); 14891 reg_n_sets_max = max_reg_num (); 14892 14893 rebuild_jump_labels_after_combine 14894 = combine_instructions (get_insns (), max_reg_num ()); 14895 14896 /* Combining insns may have turned an indirect jump into a 14897 direct jump. Rebuild the JUMP_LABEL fields of jumping 14898 instructions. */ 14899 if (rebuild_jump_labels_after_combine) 14900 { 14901 if (dom_info_available_p (CDI_DOMINATORS)) 14902 free_dominance_info (CDI_DOMINATORS); 14903 timevar_push (TV_JUMP); 14904 rebuild_jump_labels (get_insns ()); 14905 cleanup_cfg (0); 14906 timevar_pop (TV_JUMP); 14907 } 14908 14909 regstat_free_n_sets_and_refs (); 14910 return 0; 14911 } 14912 14913 namespace { 14914 14915 const pass_data pass_data_combine = 14916 { 14917 RTL_PASS, /* type */ 14918 "combine", /* name */ 14919 OPTGROUP_NONE, /* optinfo_flags */ 14920 TV_COMBINE, /* tv_id */ 14921 PROP_cfglayout, /* properties_required */ 14922 0, /* properties_provided */ 14923 0, /* properties_destroyed */ 14924 0, /* todo_flags_start */ 14925 TODO_df_finish, /* todo_flags_finish */ 14926 }; 14927 14928 class pass_combine : public rtl_opt_pass 14929 { 14930 public: 14931 pass_combine (gcc::context *ctxt) 14932 : rtl_opt_pass (pass_data_combine, ctxt) 14933 {} 14934 14935 /* opt_pass methods: */ 14936 virtual bool gate (function *) { return (optimize > 0); } 14937 virtual unsigned int execute (function *) 14938 { 14939 return rest_of_handle_combine (); 14940 } 14941 14942 }; // class pass_combine 14943 14944 } // anon namespace 14945 14946 rtl_opt_pass * 14947 make_pass_combine (gcc::context *ctxt) 14948 { 14949 return new pass_combine (ctxt); 14950 } 14951