1 /* SSA Jump Threading 2 Copyright (C) 2005-2018 Free Software Foundation, Inc. 3 Contributed by Jeff Law <law@redhat.com> 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3, or (at your option) 10 any later version. 11 12 GCC is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 #include "config.h" 22 #include "system.h" 23 #include "coretypes.h" 24 #include "backend.h" 25 #include "tree.h" 26 #include "gimple.h" 27 #include "predict.h" 28 #include "ssa.h" 29 #include "fold-const.h" 30 #include "cfgloop.h" 31 #include "gimple-iterator.h" 32 #include "tree-cfg.h" 33 #include "tree-ssa-threadupdate.h" 34 #include "params.h" 35 #include "tree-ssa-scopedtables.h" 36 #include "tree-ssa-threadedge.h" 37 #include "tree-ssa-dom.h" 38 #include "gimple-fold.h" 39 #include "cfganal.h" 40 #include "alloc-pool.h" 41 #include "vr-values.h" 42 #include "gimple-ssa-evrp-analyze.h" 43 44 /* To avoid code explosion due to jump threading, we limit the 45 number of statements we are going to copy. This variable 46 holds the number of statements currently seen that we'll have 47 to copy as part of the jump threading process. */ 48 static int stmt_count; 49 50 /* Array to record value-handles per SSA_NAME. */ 51 vec<tree> ssa_name_values; 52 53 typedef tree (pfn_simplify) (gimple *, gimple *, 54 class avail_exprs_stack *, 55 basic_block); 56 57 /* Set the value for the SSA name NAME to VALUE. */ 58 59 void 60 set_ssa_name_value (tree name, tree value) 61 { 62 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ()) 63 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1); 64 if (value && TREE_OVERFLOW_P (value)) 65 value = drop_tree_overflow (value); 66 ssa_name_values[SSA_NAME_VERSION (name)] = value; 67 } 68 69 /* Initialize the per SSA_NAME value-handles array. Returns it. */ 70 void 71 threadedge_initialize_values (void) 72 { 73 gcc_assert (!ssa_name_values.exists ()); 74 ssa_name_values.create (num_ssa_names); 75 } 76 77 /* Free the per SSA_NAME value-handle array. */ 78 void 79 threadedge_finalize_values (void) 80 { 81 ssa_name_values.release (); 82 } 83 84 /* Return TRUE if we may be able to thread an incoming edge into 85 BB to an outgoing edge from BB. Return FALSE otherwise. */ 86 87 bool 88 potentially_threadable_block (basic_block bb) 89 { 90 gimple_stmt_iterator gsi; 91 92 /* Special case. We can get blocks that are forwarders, but are 93 not optimized away because they forward from outside a loop 94 to the loop header. We want to thread through them as we can 95 sometimes thread to the loop exit, which is obviously profitable. 96 the interesting case here is when the block has PHIs. */ 97 if (gsi_end_p (gsi_start_nondebug_bb (bb)) 98 && !gsi_end_p (gsi_start_phis (bb))) 99 return true; 100 101 /* If BB has a single successor or a single predecessor, then 102 there is no threading opportunity. */ 103 if (single_succ_p (bb) || single_pred_p (bb)) 104 return false; 105 106 /* If BB does not end with a conditional, switch or computed goto, 107 then there is no threading opportunity. */ 108 gsi = gsi_last_bb (bb); 109 if (gsi_end_p (gsi) 110 || ! gsi_stmt (gsi) 111 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND 112 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO 113 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH)) 114 return false; 115 116 return true; 117 } 118 119 /* Record temporary equivalences created by PHIs at the target of the 120 edge E. Record unwind information for the equivalences into 121 CONST_AND_COPIES and EVRP_RANGE_DATA. 122 123 If a PHI which prevents threading is encountered, then return FALSE 124 indicating we should not thread this edge, else return TRUE. */ 125 126 static bool 127 record_temporary_equivalences_from_phis (edge e, 128 const_and_copies *const_and_copies, 129 evrp_range_analyzer *evrp_range_analyzer) 130 { 131 gphi_iterator gsi; 132 133 /* Each PHI creates a temporary equivalence, record them. 134 These are context sensitive equivalences and will be removed 135 later. */ 136 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi)) 137 { 138 gphi *phi = gsi.phi (); 139 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e); 140 tree dst = gimple_phi_result (phi); 141 142 /* If the desired argument is not the same as this PHI's result 143 and it is set by a PHI in E->dest, then we can not thread 144 through E->dest. */ 145 if (src != dst 146 && TREE_CODE (src) == SSA_NAME 147 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI 148 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest) 149 return false; 150 151 /* We consider any non-virtual PHI as a statement since it 152 count result in a constant assignment or copy operation. */ 153 if (!virtual_operand_p (dst)) 154 stmt_count++; 155 156 const_and_copies->record_const_or_copy (dst, src); 157 158 /* Also update the value range associated with DST, using 159 the range from SRC. 160 161 Note that even if SRC is a constant we need to set a suitable 162 output range so that VR_UNDEFINED ranges do not leak through. */ 163 if (evrp_range_analyzer) 164 { 165 /* Get an empty new VR we can pass to update_value_range and save 166 away in the VR stack. */ 167 vr_values *vr_values = evrp_range_analyzer->get_vr_values (); 168 value_range *new_vr = vr_values->allocate_value_range (); 169 memset (new_vr, 0, sizeof (value_range)); 170 171 /* There are three cases to consider: 172 173 First if SRC is an SSA_NAME, then we can copy the value 174 range from SRC into NEW_VR. 175 176 Second if SRC is an INTEGER_CST, then we can just wet 177 NEW_VR to a singleton range. 178 179 Otherwise set NEW_VR to varying. This may be overly 180 conservative. */ 181 if (TREE_CODE (src) == SSA_NAME) 182 copy_value_range (new_vr, vr_values->get_value_range (src)); 183 else if (TREE_CODE (src) == INTEGER_CST) 184 set_value_range_to_value (new_vr, src, NULL); 185 else 186 set_value_range_to_varying (new_vr); 187 188 /* This is a temporary range for DST, so push it. */ 189 evrp_range_analyzer->push_value_range (dst, new_vr); 190 } 191 } 192 return true; 193 } 194 195 /* Valueize hook for gimple_fold_stmt_to_constant_1. */ 196 197 static tree 198 threadedge_valueize (tree t) 199 { 200 if (TREE_CODE (t) == SSA_NAME) 201 { 202 tree tem = SSA_NAME_VALUE (t); 203 if (tem) 204 return tem; 205 } 206 return t; 207 } 208 209 /* Try to simplify each statement in E->dest, ultimately leading to 210 a simplification of the COND_EXPR at the end of E->dest. 211 212 Record unwind information for temporary equivalences onto STACK. 213 214 Use SIMPLIFY (a pointer to a callback function) to further simplify 215 statements using pass specific information. 216 217 We might consider marking just those statements which ultimately 218 feed the COND_EXPR. It's not clear if the overhead of bookkeeping 219 would be recovered by trying to simplify fewer statements. 220 221 If we are able to simplify a statement into the form 222 SSA_NAME = (SSA_NAME | gimple invariant), then we can record 223 a context sensitive equivalence which may help us simplify 224 later statements in E->dest. */ 225 226 static gimple * 227 record_temporary_equivalences_from_stmts_at_dest (edge e, 228 const_and_copies *const_and_copies, 229 avail_exprs_stack *avail_exprs_stack, 230 evrp_range_analyzer *evrp_range_analyzer, 231 pfn_simplify simplify) 232 { 233 gimple *stmt = NULL; 234 gimple_stmt_iterator gsi; 235 int max_stmt_count; 236 237 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS); 238 239 /* Walk through each statement in the block recording equivalences 240 we discover. Note any equivalences we discover are context 241 sensitive (ie, are dependent on traversing E) and must be unwound 242 when we're finished processing E. */ 243 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi)) 244 { 245 tree cached_lhs = NULL; 246 247 stmt = gsi_stmt (gsi); 248 249 /* Ignore empty statements and labels. */ 250 if (gimple_code (stmt) == GIMPLE_NOP 251 || gimple_code (stmt) == GIMPLE_LABEL 252 || is_gimple_debug (stmt)) 253 continue; 254 255 /* If the statement has volatile operands, then we assume we 256 can not thread through this block. This is overly 257 conservative in some ways. */ 258 if (gimple_code (stmt) == GIMPLE_ASM 259 && gimple_asm_volatile_p (as_a <gasm *> (stmt))) 260 return NULL; 261 262 /* If the statement is a unique builtin, we can not thread 263 through here. */ 264 if (gimple_code (stmt) == GIMPLE_CALL 265 && gimple_call_internal_p (stmt) 266 && gimple_call_internal_unique_p (stmt)) 267 return NULL; 268 269 /* If duplicating this block is going to cause too much code 270 expansion, then do not thread through this block. */ 271 stmt_count++; 272 if (stmt_count > max_stmt_count) 273 { 274 /* If any of the stmts in the PATH's dests are going to be 275 killed due to threading, grow the max count 276 accordingly. */ 277 if (max_stmt_count 278 == PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS)) 279 { 280 max_stmt_count += estimate_threading_killed_stmts (e->dest); 281 if (dump_file) 282 fprintf (dump_file, "threading bb %i up to %i stmts\n", 283 e->dest->index, max_stmt_count); 284 } 285 /* If we're still past the limit, we're done. */ 286 if (stmt_count > max_stmt_count) 287 return NULL; 288 } 289 290 /* These are temporary ranges, do nto reflect them back into 291 the global range data. */ 292 if (evrp_range_analyzer) 293 evrp_range_analyzer->record_ranges_from_stmt (stmt, true); 294 295 /* If this is not a statement that sets an SSA_NAME to a new 296 value, then do not try to simplify this statement as it will 297 not simplify in any way that is helpful for jump threading. */ 298 if ((gimple_code (stmt) != GIMPLE_ASSIGN 299 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) 300 && (gimple_code (stmt) != GIMPLE_CALL 301 || gimple_call_lhs (stmt) == NULL_TREE 302 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)) 303 continue; 304 305 /* The result of __builtin_object_size depends on all the arguments 306 of a phi node. Temporarily using only one edge produces invalid 307 results. For example 308 309 if (x < 6) 310 goto l; 311 else 312 goto l; 313 314 l: 315 r = PHI <&w[2].a[1](2), &a.a[6](3)> 316 __builtin_object_size (r, 0) 317 318 The result of __builtin_object_size is defined to be the maximum of 319 remaining bytes. If we use only one edge on the phi, the result will 320 change to be the remaining bytes for the corresponding phi argument. 321 322 Similarly for __builtin_constant_p: 323 324 r = PHI <1(2), 2(3)> 325 __builtin_constant_p (r) 326 327 Both PHI arguments are constant, but x ? 1 : 2 is still not 328 constant. */ 329 330 if (is_gimple_call (stmt)) 331 { 332 tree fndecl = gimple_call_fndecl (stmt); 333 if (fndecl 334 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE 335 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)) 336 continue; 337 } 338 339 /* At this point we have a statement which assigns an RHS to an 340 SSA_VAR on the LHS. We want to try and simplify this statement 341 to expose more context sensitive equivalences which in turn may 342 allow us to simplify the condition at the end of the loop. 343 344 Handle simple copy operations as well as implied copies from 345 ASSERT_EXPRs. */ 346 if (gimple_assign_single_p (stmt) 347 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME) 348 cached_lhs = gimple_assign_rhs1 (stmt); 349 else if (gimple_assign_single_p (stmt) 350 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR) 351 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0); 352 else 353 { 354 /* A statement that is not a trivial copy or ASSERT_EXPR. 355 Try to fold the new expression. Inserting the 356 expression into the hash table is unlikely to help. */ 357 /* ??? The DOM callback below can be changed to setting 358 the mprts_hook around the call to thread_across_edge, 359 avoiding the use substitution. The VRP hook should be 360 changed to properly valueize operands itself using 361 SSA_NAME_VALUE in addition to its own lattice. */ 362 cached_lhs = gimple_fold_stmt_to_constant_1 (stmt, 363 threadedge_valueize); 364 if (NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES) != 0 365 && (!cached_lhs 366 || (TREE_CODE (cached_lhs) != SSA_NAME 367 && !is_gimple_min_invariant (cached_lhs)))) 368 { 369 /* We're going to temporarily copy propagate the operands 370 and see if that allows us to simplify this statement. */ 371 tree *copy; 372 ssa_op_iter iter; 373 use_operand_p use_p; 374 unsigned int num, i = 0; 375 376 num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES); 377 copy = XALLOCAVEC (tree, num); 378 379 /* Make a copy of the uses & vuses into USES_COPY, then cprop into 380 the operands. */ 381 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES) 382 { 383 tree tmp = NULL; 384 tree use = USE_FROM_PTR (use_p); 385 386 copy[i++] = use; 387 if (TREE_CODE (use) == SSA_NAME) 388 tmp = SSA_NAME_VALUE (use); 389 if (tmp) 390 SET_USE (use_p, tmp); 391 } 392 393 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack, e->src); 394 395 /* Restore the statement's original uses/defs. */ 396 i = 0; 397 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES) 398 SET_USE (use_p, copy[i++]); 399 } 400 } 401 402 /* Record the context sensitive equivalence if we were able 403 to simplify this statement. */ 404 if (cached_lhs 405 && (TREE_CODE (cached_lhs) == SSA_NAME 406 || is_gimple_min_invariant (cached_lhs))) 407 const_and_copies->record_const_or_copy (gimple_get_lhs (stmt), 408 cached_lhs); 409 } 410 return stmt; 411 } 412 413 static tree simplify_control_stmt_condition_1 (edge, gimple *, 414 class avail_exprs_stack *, 415 tree, enum tree_code, tree, 416 gcond *, pfn_simplify, 417 unsigned); 418 419 /* Simplify the control statement at the end of the block E->dest. 420 421 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND 422 is available to use/clobber in DUMMY_COND. 423 424 Use SIMPLIFY (a pointer to a callback function) to further simplify 425 a condition using pass specific information. 426 427 Return the simplified condition or NULL if simplification could 428 not be performed. When simplifying a GIMPLE_SWITCH, we may return 429 the CASE_LABEL_EXPR that will be taken. 430 431 The available expression table is referenced via AVAIL_EXPRS_STACK. */ 432 433 static tree 434 simplify_control_stmt_condition (edge e, 435 gimple *stmt, 436 class avail_exprs_stack *avail_exprs_stack, 437 gcond *dummy_cond, 438 pfn_simplify simplify) 439 { 440 tree cond, cached_lhs; 441 enum gimple_code code = gimple_code (stmt); 442 443 /* For comparisons, we have to update both operands, then try 444 to simplify the comparison. */ 445 if (code == GIMPLE_COND) 446 { 447 tree op0, op1; 448 enum tree_code cond_code; 449 450 op0 = gimple_cond_lhs (stmt); 451 op1 = gimple_cond_rhs (stmt); 452 cond_code = gimple_cond_code (stmt); 453 454 /* Get the current value of both operands. */ 455 if (TREE_CODE (op0) == SSA_NAME) 456 { 457 for (int i = 0; i < 2; i++) 458 { 459 if (TREE_CODE (op0) == SSA_NAME 460 && SSA_NAME_VALUE (op0)) 461 op0 = SSA_NAME_VALUE (op0); 462 else 463 break; 464 } 465 } 466 467 if (TREE_CODE (op1) == SSA_NAME) 468 { 469 for (int i = 0; i < 2; i++) 470 { 471 if (TREE_CODE (op1) == SSA_NAME 472 && SSA_NAME_VALUE (op1)) 473 op1 = SSA_NAME_VALUE (op1); 474 else 475 break; 476 } 477 } 478 479 const unsigned recursion_limit = 4; 480 481 cached_lhs 482 = simplify_control_stmt_condition_1 (e, stmt, avail_exprs_stack, 483 op0, cond_code, op1, 484 dummy_cond, simplify, 485 recursion_limit); 486 487 /* If we were testing an integer/pointer against a constant, then 488 we can use the FSM code to trace the value of the SSA_NAME. If 489 a value is found, then the condition will collapse to a constant. 490 491 Return the SSA_NAME we want to trace back rather than the full 492 expression and give the FSM threader a chance to find its value. */ 493 if (cached_lhs == NULL) 494 { 495 /* Recover the original operands. They may have been simplified 496 using context sensitive equivalences. Those context sensitive 497 equivalences may not be valid on paths found by the FSM optimizer. */ 498 tree op0 = gimple_cond_lhs (stmt); 499 tree op1 = gimple_cond_rhs (stmt); 500 501 if ((INTEGRAL_TYPE_P (TREE_TYPE (op0)) 502 || POINTER_TYPE_P (TREE_TYPE (op0))) 503 && TREE_CODE (op0) == SSA_NAME 504 && TREE_CODE (op1) == INTEGER_CST) 505 return op0; 506 } 507 508 return cached_lhs; 509 } 510 511 if (code == GIMPLE_SWITCH) 512 cond = gimple_switch_index (as_a <gswitch *> (stmt)); 513 else if (code == GIMPLE_GOTO) 514 cond = gimple_goto_dest (stmt); 515 else 516 gcc_unreachable (); 517 518 /* We can have conditionals which just test the state of a variable 519 rather than use a relational operator. These are simpler to handle. */ 520 if (TREE_CODE (cond) == SSA_NAME) 521 { 522 tree original_lhs = cond; 523 cached_lhs = cond; 524 525 /* Get the variable's current value from the equivalence chains. 526 527 It is possible to get loops in the SSA_NAME_VALUE chains 528 (consider threading the backedge of a loop where we have 529 a loop invariant SSA_NAME used in the condition). */ 530 if (cached_lhs) 531 { 532 for (int i = 0; i < 2; i++) 533 { 534 if (TREE_CODE (cached_lhs) == SSA_NAME 535 && SSA_NAME_VALUE (cached_lhs)) 536 cached_lhs = SSA_NAME_VALUE (cached_lhs); 537 else 538 break; 539 } 540 } 541 542 /* If we haven't simplified to an invariant yet, then use the 543 pass specific callback to try and simplify it further. */ 544 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs)) 545 { 546 if (code == GIMPLE_SWITCH) 547 { 548 /* Replace the index operand of the GIMPLE_SWITCH with any LHS 549 we found before handing off to VRP. If simplification is 550 possible, the simplified value will be a CASE_LABEL_EXPR of 551 the label that is proven to be taken. */ 552 gswitch *dummy_switch = as_a<gswitch *> (gimple_copy (stmt)); 553 gimple_switch_set_index (dummy_switch, cached_lhs); 554 cached_lhs = (*simplify) (dummy_switch, stmt, 555 avail_exprs_stack, e->src); 556 ggc_free (dummy_switch); 557 } 558 else 559 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack, e->src); 560 } 561 562 /* We couldn't find an invariant. But, callers of this 563 function may be able to do something useful with the 564 unmodified destination. */ 565 if (!cached_lhs) 566 cached_lhs = original_lhs; 567 } 568 else 569 cached_lhs = NULL; 570 571 return cached_lhs; 572 } 573 574 /* Recursive helper for simplify_control_stmt_condition. */ 575 576 static tree 577 simplify_control_stmt_condition_1 (edge e, 578 gimple *stmt, 579 class avail_exprs_stack *avail_exprs_stack, 580 tree op0, 581 enum tree_code cond_code, 582 tree op1, 583 gcond *dummy_cond, 584 pfn_simplify simplify, 585 unsigned limit) 586 { 587 if (limit == 0) 588 return NULL_TREE; 589 590 /* We may need to canonicalize the comparison. For 591 example, op0 might be a constant while op1 is an 592 SSA_NAME. Failure to canonicalize will cause us to 593 miss threading opportunities. */ 594 if (tree_swap_operands_p (op0, op1)) 595 { 596 cond_code = swap_tree_comparison (cond_code); 597 std::swap (op0, op1); 598 } 599 600 /* If the condition has the form (A & B) CMP 0 or (A | B) CMP 0 then 601 recurse into the LHS to see if there is a dominating ASSERT_EXPR 602 of A or of B that makes this condition always true or always false 603 along the edge E. */ 604 if ((cond_code == EQ_EXPR || cond_code == NE_EXPR) 605 && TREE_CODE (op0) == SSA_NAME 606 && integer_zerop (op1)) 607 { 608 gimple *def_stmt = SSA_NAME_DEF_STMT (op0); 609 if (gimple_code (def_stmt) != GIMPLE_ASSIGN) 610 ; 611 else if (gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR 612 || gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR) 613 { 614 enum tree_code rhs_code = gimple_assign_rhs_code (def_stmt); 615 const tree rhs1 = gimple_assign_rhs1 (def_stmt); 616 const tree rhs2 = gimple_assign_rhs2 (def_stmt); 617 618 /* Is A != 0 ? */ 619 const tree res1 620 = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack, 621 rhs1, NE_EXPR, op1, 622 dummy_cond, simplify, 623 limit - 1); 624 if (res1 == NULL_TREE) 625 ; 626 else if (rhs_code == BIT_AND_EXPR && integer_zerop (res1)) 627 { 628 /* If A == 0 then (A & B) != 0 is always false. */ 629 if (cond_code == NE_EXPR) 630 return boolean_false_node; 631 /* If A == 0 then (A & B) == 0 is always true. */ 632 if (cond_code == EQ_EXPR) 633 return boolean_true_node; 634 } 635 else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res1)) 636 { 637 /* If A != 0 then (A | B) != 0 is always true. */ 638 if (cond_code == NE_EXPR) 639 return boolean_true_node; 640 /* If A != 0 then (A | B) == 0 is always false. */ 641 if (cond_code == EQ_EXPR) 642 return boolean_false_node; 643 } 644 645 /* Is B != 0 ? */ 646 const tree res2 647 = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack, 648 rhs2, NE_EXPR, op1, 649 dummy_cond, simplify, 650 limit - 1); 651 if (res2 == NULL_TREE) 652 ; 653 else if (rhs_code == BIT_AND_EXPR && integer_zerop (res2)) 654 { 655 /* If B == 0 then (A & B) != 0 is always false. */ 656 if (cond_code == NE_EXPR) 657 return boolean_false_node; 658 /* If B == 0 then (A & B) == 0 is always true. */ 659 if (cond_code == EQ_EXPR) 660 return boolean_true_node; 661 } 662 else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res2)) 663 { 664 /* If B != 0 then (A | B) != 0 is always true. */ 665 if (cond_code == NE_EXPR) 666 return boolean_true_node; 667 /* If B != 0 then (A | B) == 0 is always false. */ 668 if (cond_code == EQ_EXPR) 669 return boolean_false_node; 670 } 671 672 if (res1 != NULL_TREE && res2 != NULL_TREE) 673 { 674 if (rhs_code == BIT_AND_EXPR 675 && TYPE_PRECISION (TREE_TYPE (op0)) == 1 676 && integer_nonzerop (res1) 677 && integer_nonzerop (res2)) 678 { 679 /* If A != 0 and B != 0 then (bool)(A & B) != 0 is true. */ 680 if (cond_code == NE_EXPR) 681 return boolean_true_node; 682 /* If A != 0 and B != 0 then (bool)(A & B) == 0 is false. */ 683 if (cond_code == EQ_EXPR) 684 return boolean_false_node; 685 } 686 687 if (rhs_code == BIT_IOR_EXPR 688 && integer_zerop (res1) 689 && integer_zerop (res2)) 690 { 691 /* If A == 0 and B == 0 then (A | B) != 0 is false. */ 692 if (cond_code == NE_EXPR) 693 return boolean_false_node; 694 /* If A == 0 and B == 0 then (A | B) == 0 is true. */ 695 if (cond_code == EQ_EXPR) 696 return boolean_true_node; 697 } 698 } 699 } 700 /* Handle (A CMP B) CMP 0. */ 701 else if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) 702 == tcc_comparison) 703 { 704 tree rhs1 = gimple_assign_rhs1 (def_stmt); 705 tree rhs2 = gimple_assign_rhs2 (def_stmt); 706 707 tree_code new_cond = gimple_assign_rhs_code (def_stmt); 708 if (cond_code == EQ_EXPR) 709 new_cond = invert_tree_comparison (new_cond, false); 710 711 tree res 712 = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack, 713 rhs1, new_cond, rhs2, 714 dummy_cond, simplify, 715 limit - 1); 716 if (res != NULL_TREE && is_gimple_min_invariant (res)) 717 return res; 718 } 719 } 720 721 gimple_cond_set_code (dummy_cond, cond_code); 722 gimple_cond_set_lhs (dummy_cond, op0); 723 gimple_cond_set_rhs (dummy_cond, op1); 724 725 /* We absolutely do not care about any type conversions 726 we only care about a zero/nonzero value. */ 727 fold_defer_overflow_warnings (); 728 729 tree res = fold_binary (cond_code, boolean_type_node, op0, op1); 730 if (res) 731 while (CONVERT_EXPR_P (res)) 732 res = TREE_OPERAND (res, 0); 733 734 fold_undefer_overflow_warnings ((res && is_gimple_min_invariant (res)), 735 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL); 736 737 /* If we have not simplified the condition down to an invariant, 738 then use the pass specific callback to simplify the condition. */ 739 if (!res 740 || !is_gimple_min_invariant (res)) 741 res = (*simplify) (dummy_cond, stmt, avail_exprs_stack, e->src); 742 743 return res; 744 } 745 746 /* Copy debug stmts from DEST's chain of single predecessors up to 747 SRC, so that we don't lose the bindings as PHI nodes are introduced 748 when DEST gains new predecessors. */ 749 void 750 propagate_threaded_block_debug_into (basic_block dest, basic_block src) 751 { 752 if (!MAY_HAVE_DEBUG_BIND_STMTS) 753 return; 754 755 if (!single_pred_p (dest)) 756 return; 757 758 gcc_checking_assert (dest != src); 759 760 gimple_stmt_iterator gsi = gsi_after_labels (dest); 761 int i = 0; 762 const int alloc_count = 16; // ?? Should this be a PARAM? 763 764 /* Estimate the number of debug vars overridden in the beginning of 765 DEST, to tell how many we're going to need to begin with. */ 766 for (gimple_stmt_iterator si = gsi; 767 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si)) 768 { 769 gimple *stmt = gsi_stmt (si); 770 if (!is_gimple_debug (stmt)) 771 break; 772 if (gimple_debug_nonbind_marker_p (stmt)) 773 continue; 774 i++; 775 } 776 777 auto_vec<tree, alloc_count> fewvars; 778 hash_set<tree> *vars = NULL; 779 780 /* If we're already starting with 3/4 of alloc_count, go for a 781 hash_set, otherwise start with an unordered stack-allocated 782 VEC. */ 783 if (i * 4 > alloc_count * 3) 784 vars = new hash_set<tree>; 785 786 /* Now go through the initial debug stmts in DEST again, this time 787 actually inserting in VARS or FEWVARS. Don't bother checking for 788 duplicates in FEWVARS. */ 789 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si)) 790 { 791 gimple *stmt = gsi_stmt (si); 792 if (!is_gimple_debug (stmt)) 793 break; 794 795 tree var; 796 797 if (gimple_debug_bind_p (stmt)) 798 var = gimple_debug_bind_get_var (stmt); 799 else if (gimple_debug_source_bind_p (stmt)) 800 var = gimple_debug_source_bind_get_var (stmt); 801 else if (gimple_debug_nonbind_marker_p (stmt)) 802 continue; 803 else 804 gcc_unreachable (); 805 806 if (vars) 807 vars->add (var); 808 else 809 fewvars.quick_push (var); 810 } 811 812 basic_block bb = dest; 813 814 do 815 { 816 bb = single_pred (bb); 817 for (gimple_stmt_iterator si = gsi_last_bb (bb); 818 !gsi_end_p (si); gsi_prev (&si)) 819 { 820 gimple *stmt = gsi_stmt (si); 821 if (!is_gimple_debug (stmt)) 822 continue; 823 824 tree var; 825 826 if (gimple_debug_bind_p (stmt)) 827 var = gimple_debug_bind_get_var (stmt); 828 else if (gimple_debug_source_bind_p (stmt)) 829 var = gimple_debug_source_bind_get_var (stmt); 830 else if (gimple_debug_nonbind_marker_p (stmt)) 831 continue; 832 else 833 gcc_unreachable (); 834 835 /* Discard debug bind overlaps. Unlike stmts from src, 836 copied into a new block that will precede BB, debug bind 837 stmts in bypassed BBs may actually be discarded if 838 they're overwritten by subsequent debug bind stmts. We 839 want to copy binds for all modified variables, so that we 840 retain a bind to the shared def if there is one, or to a 841 newly introduced PHI node if there is one. Our bind will 842 end up reset if the value is dead, but that implies the 843 variable couldn't have survived, so it's fine. We are 844 not actually running the code that performed the binds at 845 this point, we're just adding binds so that they survive 846 the new confluence, so markers should not be copied. */ 847 if (vars && vars->add (var)) 848 continue; 849 else if (!vars) 850 { 851 int i = fewvars.length (); 852 while (i--) 853 if (fewvars[i] == var) 854 break; 855 if (i >= 0) 856 continue; 857 else if (fewvars.length () < (unsigned) alloc_count) 858 fewvars.quick_push (var); 859 else 860 { 861 vars = new hash_set<tree>; 862 for (i = 0; i < alloc_count; i++) 863 vars->add (fewvars[i]); 864 fewvars.release (); 865 vars->add (var); 866 } 867 } 868 869 stmt = gimple_copy (stmt); 870 /* ??? Should we drop the location of the copy to denote 871 they're artificial bindings? */ 872 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); 873 } 874 } 875 while (bb != src && single_pred_p (bb)); 876 877 if (vars) 878 delete vars; 879 else if (fewvars.exists ()) 880 fewvars.release (); 881 } 882 883 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it 884 need not be duplicated as part of the CFG/SSA updating process). 885 886 If it is threadable, add it to PATH and VISITED and recurse, ultimately 887 returning TRUE from the toplevel call. Otherwise do nothing and 888 return false. 889 890 DUMMY_COND, SIMPLIFY are used to try and simplify the condition at the 891 end of TAKEN_EDGE->dest. 892 893 The available expression table is referenced via AVAIL_EXPRS_STACK. */ 894 895 static bool 896 thread_around_empty_blocks (edge taken_edge, 897 gcond *dummy_cond, 898 class avail_exprs_stack *avail_exprs_stack, 899 pfn_simplify simplify, 900 bitmap visited, 901 vec<jump_thread_edge *> *path) 902 { 903 basic_block bb = taken_edge->dest; 904 gimple_stmt_iterator gsi; 905 gimple *stmt; 906 tree cond; 907 908 /* The key property of these blocks is that they need not be duplicated 909 when threading. Thus they can not have visible side effects such 910 as PHI nodes. */ 911 if (!gsi_end_p (gsi_start_phis (bb))) 912 return false; 913 914 /* Skip over DEBUG statements at the start of the block. */ 915 gsi = gsi_start_nondebug_bb (bb); 916 917 /* If the block has no statements, but does have a single successor, then 918 it's just a forwarding block and we can thread through it trivially. 919 920 However, note that just threading through empty blocks with single 921 successors is not inherently profitable. For the jump thread to 922 be profitable, we must avoid a runtime conditional. 923 924 By taking the return value from the recursive call, we get the 925 desired effect of returning TRUE when we found a profitable jump 926 threading opportunity and FALSE otherwise. 927 928 This is particularly important when this routine is called after 929 processing a joiner block. Returning TRUE too aggressively in 930 that case results in pointless duplication of the joiner block. */ 931 if (gsi_end_p (gsi)) 932 { 933 if (single_succ_p (bb)) 934 { 935 taken_edge = single_succ_edge (bb); 936 937 if ((taken_edge->flags & EDGE_DFS_BACK) != 0) 938 return false; 939 940 if (!bitmap_bit_p (visited, taken_edge->dest->index)) 941 { 942 jump_thread_edge *x 943 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK); 944 path->safe_push (x); 945 bitmap_set_bit (visited, taken_edge->dest->index); 946 return thread_around_empty_blocks (taken_edge, 947 dummy_cond, 948 avail_exprs_stack, 949 simplify, 950 visited, 951 path); 952 } 953 } 954 955 /* We have a block with no statements, but multiple successors? */ 956 return false; 957 } 958 959 /* The only real statements this block can have are a control 960 flow altering statement. Anything else stops the thread. */ 961 stmt = gsi_stmt (gsi); 962 if (gimple_code (stmt) != GIMPLE_COND 963 && gimple_code (stmt) != GIMPLE_GOTO 964 && gimple_code (stmt) != GIMPLE_SWITCH) 965 return false; 966 967 /* Extract and simplify the condition. */ 968 cond = simplify_control_stmt_condition (taken_edge, stmt, 969 avail_exprs_stack, dummy_cond, 970 simplify); 971 972 /* If the condition can be statically computed and we have not already 973 visited the destination edge, then add the taken edge to our thread 974 path. */ 975 if (cond != NULL_TREE 976 && (is_gimple_min_invariant (cond) 977 || TREE_CODE (cond) == CASE_LABEL_EXPR)) 978 { 979 if (TREE_CODE (cond) == CASE_LABEL_EXPR) 980 taken_edge = find_edge (bb, label_to_block (CASE_LABEL (cond))); 981 else 982 taken_edge = find_taken_edge (bb, cond); 983 984 if ((taken_edge->flags & EDGE_DFS_BACK) != 0) 985 return false; 986 987 if (bitmap_bit_p (visited, taken_edge->dest->index)) 988 return false; 989 bitmap_set_bit (visited, taken_edge->dest->index); 990 991 jump_thread_edge *x 992 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK); 993 path->safe_push (x); 994 995 thread_around_empty_blocks (taken_edge, 996 dummy_cond, 997 avail_exprs_stack, 998 simplify, 999 visited, 1000 path); 1001 return true; 1002 } 1003 1004 return false; 1005 } 1006 1007 /* We are exiting E->src, see if E->dest ends with a conditional 1008 jump which has a known value when reached via E. 1009 1010 E->dest can have arbitrary side effects which, if threading is 1011 successful, will be maintained. 1012 1013 Special care is necessary if E is a back edge in the CFG as we 1014 may have already recorded equivalences for E->dest into our 1015 various tables, including the result of the conditional at 1016 the end of E->dest. Threading opportunities are severely 1017 limited in that case to avoid short-circuiting the loop 1018 incorrectly. 1019 1020 DUMMY_COND is a shared cond_expr used by condition simplification as scratch, 1021 to avoid allocating memory. 1022 1023 STACK is used to undo temporary equivalences created during the walk of 1024 E->dest. 1025 1026 SIMPLIFY is a pass-specific function used to simplify statements. 1027 1028 Our caller is responsible for restoring the state of the expression 1029 and const_and_copies stacks. 1030 1031 Positive return value is success. Zero return value is failure, but 1032 the block can still be duplicated as a joiner in a jump thread path, 1033 negative indicates the block should not be duplicated and thus is not 1034 suitable for a joiner in a jump threading path. */ 1035 1036 static int 1037 thread_through_normal_block (edge e, 1038 gcond *dummy_cond, 1039 const_and_copies *const_and_copies, 1040 avail_exprs_stack *avail_exprs_stack, 1041 evrp_range_analyzer *evrp_range_analyzer, 1042 pfn_simplify simplify, 1043 vec<jump_thread_edge *> *path, 1044 bitmap visited) 1045 { 1046 /* We want to record any equivalences created by traversing E. */ 1047 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack); 1048 1049 /* PHIs create temporary equivalences. 1050 Note that if we found a PHI that made the block non-threadable, then 1051 we need to bubble that up to our caller in the same manner we do 1052 when we prematurely stop processing statements below. */ 1053 if (!record_temporary_equivalences_from_phis (e, const_and_copies, 1054 evrp_range_analyzer)) 1055 return -1; 1056 1057 /* Now walk each statement recording any context sensitive 1058 temporary equivalences we can detect. */ 1059 gimple *stmt 1060 = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies, 1061 avail_exprs_stack, 1062 evrp_range_analyzer, 1063 simplify); 1064 1065 /* There's two reasons STMT might be null, and distinguishing 1066 between them is important. 1067 1068 First the block may not have had any statements. For example, it 1069 might have some PHIs and unconditionally transfer control elsewhere. 1070 Such blocks are suitable for jump threading, particularly as a 1071 joiner block. 1072 1073 The second reason would be if we did not process all the statements 1074 in the block (because there were too many to make duplicating the 1075 block profitable. If we did not look at all the statements, then 1076 we may not have invalidated everything needing invalidation. Thus 1077 we must signal to our caller that this block is not suitable for 1078 use as a joiner in a threading path. */ 1079 if (!stmt) 1080 { 1081 /* First case. The statement simply doesn't have any instructions, but 1082 does have PHIs. */ 1083 if (gsi_end_p (gsi_start_nondebug_bb (e->dest)) 1084 && !gsi_end_p (gsi_start_phis (e->dest))) 1085 return 0; 1086 1087 /* Second case. */ 1088 return -1; 1089 } 1090 1091 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm 1092 will be taken. */ 1093 if (gimple_code (stmt) == GIMPLE_COND 1094 || gimple_code (stmt) == GIMPLE_GOTO 1095 || gimple_code (stmt) == GIMPLE_SWITCH) 1096 { 1097 tree cond; 1098 1099 /* Extract and simplify the condition. */ 1100 cond = simplify_control_stmt_condition (e, stmt, avail_exprs_stack, 1101 dummy_cond, simplify); 1102 1103 if (!cond) 1104 return 0; 1105 1106 if (is_gimple_min_invariant (cond) 1107 || TREE_CODE (cond) == CASE_LABEL_EXPR) 1108 { 1109 edge taken_edge; 1110 if (TREE_CODE (cond) == CASE_LABEL_EXPR) 1111 taken_edge = find_edge (e->dest, 1112 label_to_block (CASE_LABEL (cond))); 1113 else 1114 taken_edge = find_taken_edge (e->dest, cond); 1115 1116 basic_block dest = (taken_edge ? taken_edge->dest : NULL); 1117 1118 /* DEST could be NULL for a computed jump to an absolute 1119 address. */ 1120 if (dest == NULL 1121 || dest == e->dest 1122 || (taken_edge->flags & EDGE_DFS_BACK) != 0 1123 || bitmap_bit_p (visited, dest->index)) 1124 return 0; 1125 1126 /* Only push the EDGE_START_JUMP_THREAD marker if this is 1127 first edge on the path. */ 1128 if (path->length () == 0) 1129 { 1130 jump_thread_edge *x 1131 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD); 1132 path->safe_push (x); 1133 } 1134 1135 jump_thread_edge *x 1136 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK); 1137 path->safe_push (x); 1138 1139 /* See if we can thread through DEST as well, this helps capture 1140 secondary effects of threading without having to re-run DOM or 1141 VRP. 1142 1143 We don't want to thread back to a block we have already 1144 visited. This may be overly conservative. */ 1145 bitmap_set_bit (visited, dest->index); 1146 bitmap_set_bit (visited, e->dest->index); 1147 thread_around_empty_blocks (taken_edge, 1148 dummy_cond, 1149 avail_exprs_stack, 1150 simplify, 1151 visited, 1152 path); 1153 return 1; 1154 } 1155 } 1156 return 0; 1157 } 1158 1159 /* We are exiting E->src, see if E->dest ends with a conditional 1160 jump which has a known value when reached via E. 1161 1162 DUMMY_COND is a shared cond_expr used by condition simplification as scratch, 1163 to avoid allocating memory. 1164 1165 CONST_AND_COPIES is used to undo temporary equivalences created during the 1166 walk of E->dest. 1167 1168 The available expression table is referenced vai AVAIL_EXPRS_STACK. 1169 1170 SIMPLIFY is a pass-specific function used to simplify statements. */ 1171 1172 static void 1173 thread_across_edge (gcond *dummy_cond, 1174 edge e, 1175 class const_and_copies *const_and_copies, 1176 class avail_exprs_stack *avail_exprs_stack, 1177 class evrp_range_analyzer *evrp_range_analyzer, 1178 pfn_simplify simplify) 1179 { 1180 bitmap visited = BITMAP_ALLOC (NULL); 1181 1182 const_and_copies->push_marker (); 1183 avail_exprs_stack->push_marker (); 1184 if (evrp_range_analyzer) 1185 evrp_range_analyzer->push_marker (); 1186 1187 stmt_count = 0; 1188 1189 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> (); 1190 bitmap_clear (visited); 1191 bitmap_set_bit (visited, e->src->index); 1192 bitmap_set_bit (visited, e->dest->index); 1193 1194 int threaded; 1195 if ((e->flags & EDGE_DFS_BACK) == 0) 1196 threaded = thread_through_normal_block (e, dummy_cond, 1197 const_and_copies, 1198 avail_exprs_stack, 1199 evrp_range_analyzer, 1200 simplify, path, 1201 visited); 1202 else 1203 threaded = 0; 1204 1205 if (threaded > 0) 1206 { 1207 propagate_threaded_block_debug_into (path->last ()->e->dest, 1208 e->dest); 1209 const_and_copies->pop_to_marker (); 1210 avail_exprs_stack->pop_to_marker (); 1211 if (evrp_range_analyzer) 1212 evrp_range_analyzer->pop_to_marker (); 1213 BITMAP_FREE (visited); 1214 register_jump_thread (path); 1215 return; 1216 } 1217 else 1218 { 1219 /* Negative and zero return values indicate no threading was possible, 1220 thus there should be no edges on the thread path and no need to walk 1221 through the vector entries. */ 1222 gcc_assert (path->length () == 0); 1223 path->release (); 1224 delete path; 1225 1226 /* A negative status indicates the target block was deemed too big to 1227 duplicate. Just quit now rather than trying to use the block as 1228 a joiner in a jump threading path. 1229 1230 This prevents unnecessary code growth, but more importantly if we 1231 do not look at all the statements in the block, then we may have 1232 missed some invalidations if we had traversed a backedge! */ 1233 if (threaded < 0) 1234 { 1235 BITMAP_FREE (visited); 1236 const_and_copies->pop_to_marker (); 1237 avail_exprs_stack->pop_to_marker (); 1238 if (evrp_range_analyzer) 1239 evrp_range_analyzer->pop_to_marker (); 1240 return; 1241 } 1242 } 1243 1244 /* We were unable to determine what out edge from E->dest is taken. However, 1245 we might still be able to thread through successors of E->dest. This 1246 often occurs when E->dest is a joiner block which then fans back out 1247 based on redundant tests. 1248 1249 If so, we'll copy E->dest and redirect the appropriate predecessor to 1250 the copy. Within the copy of E->dest, we'll thread one or more edges 1251 to points deeper in the CFG. 1252 1253 This is a stopgap until we have a more structured approach to path 1254 isolation. */ 1255 { 1256 edge taken_edge; 1257 edge_iterator ei; 1258 bool found; 1259 1260 /* If E->dest has abnormal outgoing edges, then there's no guarantee 1261 we can safely redirect any of the edges. Just punt those cases. */ 1262 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs) 1263 if (taken_edge->flags & EDGE_ABNORMAL) 1264 { 1265 const_and_copies->pop_to_marker (); 1266 avail_exprs_stack->pop_to_marker (); 1267 if (evrp_range_analyzer) 1268 evrp_range_analyzer->pop_to_marker (); 1269 BITMAP_FREE (visited); 1270 return; 1271 } 1272 1273 /* Look at each successor of E->dest to see if we can thread through it. */ 1274 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs) 1275 { 1276 if ((e->flags & EDGE_DFS_BACK) != 0 1277 || (taken_edge->flags & EDGE_DFS_BACK) != 0) 1278 continue; 1279 1280 /* Push a fresh marker so we can unwind the equivalences created 1281 for each of E->dest's successors. */ 1282 const_and_copies->push_marker (); 1283 avail_exprs_stack->push_marker (); 1284 if (evrp_range_analyzer) 1285 evrp_range_analyzer->push_marker (); 1286 1287 /* Avoid threading to any block we have already visited. */ 1288 bitmap_clear (visited); 1289 bitmap_set_bit (visited, e->src->index); 1290 bitmap_set_bit (visited, e->dest->index); 1291 bitmap_set_bit (visited, taken_edge->dest->index); 1292 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> (); 1293 1294 /* Record whether or not we were able to thread through a successor 1295 of E->dest. */ 1296 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD); 1297 path->safe_push (x); 1298 1299 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK); 1300 path->safe_push (x); 1301 found = false; 1302 found = thread_around_empty_blocks (taken_edge, 1303 dummy_cond, 1304 avail_exprs_stack, 1305 simplify, 1306 visited, 1307 path); 1308 1309 if (!found) 1310 found = thread_through_normal_block (path->last ()->e, dummy_cond, 1311 const_and_copies, 1312 avail_exprs_stack, 1313 evrp_range_analyzer, 1314 simplify, path, 1315 visited) > 0; 1316 1317 /* If we were able to thread through a successor of E->dest, then 1318 record the jump threading opportunity. */ 1319 if (found) 1320 { 1321 propagate_threaded_block_debug_into (path->last ()->e->dest, 1322 taken_edge->dest); 1323 register_jump_thread (path); 1324 } 1325 else 1326 delete_jump_thread_path (path); 1327 1328 /* And unwind the equivalence table. */ 1329 if (evrp_range_analyzer) 1330 evrp_range_analyzer->pop_to_marker (); 1331 avail_exprs_stack->pop_to_marker (); 1332 const_and_copies->pop_to_marker (); 1333 } 1334 BITMAP_FREE (visited); 1335 } 1336 1337 if (evrp_range_analyzer) 1338 evrp_range_analyzer->pop_to_marker (); 1339 const_and_copies->pop_to_marker (); 1340 avail_exprs_stack->pop_to_marker (); 1341 } 1342 1343 /* Examine the outgoing edges from BB and conditionally 1344 try to thread them. 1345 1346 DUMMY_COND is a shared cond_expr used by condition simplification as scratch, 1347 to avoid allocating memory. 1348 1349 CONST_AND_COPIES is used to undo temporary equivalences created during the 1350 walk of E->dest. 1351 1352 The available expression table is referenced vai AVAIL_EXPRS_STACK. 1353 1354 SIMPLIFY is a pass-specific function used to simplify statements. */ 1355 1356 void 1357 thread_outgoing_edges (basic_block bb, gcond *dummy_cond, 1358 class const_and_copies *const_and_copies, 1359 class avail_exprs_stack *avail_exprs_stack, 1360 class evrp_range_analyzer *evrp_range_analyzer, 1361 tree (*simplify) (gimple *, gimple *, 1362 class avail_exprs_stack *, 1363 basic_block)) 1364 { 1365 int flags = (EDGE_IGNORE | EDGE_COMPLEX | EDGE_ABNORMAL); 1366 gimple *last; 1367 1368 /* If we have an outgoing edge to a block with multiple incoming and 1369 outgoing edges, then we may be able to thread the edge, i.e., we 1370 may be able to statically determine which of the outgoing edges 1371 will be traversed when the incoming edge from BB is traversed. */ 1372 if (single_succ_p (bb) 1373 && (single_succ_edge (bb)->flags & flags) == 0 1374 && potentially_threadable_block (single_succ (bb))) 1375 { 1376 thread_across_edge (dummy_cond, single_succ_edge (bb), 1377 const_and_copies, avail_exprs_stack, 1378 evrp_range_analyzer, simplify); 1379 } 1380 else if ((last = last_stmt (bb)) 1381 && gimple_code (last) == GIMPLE_COND 1382 && EDGE_COUNT (bb->succs) == 2 1383 && (EDGE_SUCC (bb, 0)->flags & flags) == 0 1384 && (EDGE_SUCC (bb, 1)->flags & flags) == 0) 1385 { 1386 edge true_edge, false_edge; 1387 1388 extract_true_false_edges_from_block (bb, &true_edge, &false_edge); 1389 1390 /* Only try to thread the edge if it reaches a target block with 1391 more than one predecessor and more than one successor. */ 1392 if (potentially_threadable_block (true_edge->dest)) 1393 thread_across_edge (dummy_cond, true_edge, 1394 const_and_copies, avail_exprs_stack, 1395 evrp_range_analyzer, simplify); 1396 1397 /* Similarly for the ELSE arm. */ 1398 if (potentially_threadable_block (false_edge->dest)) 1399 thread_across_edge (dummy_cond, false_edge, 1400 const_and_copies, avail_exprs_stack, 1401 evrp_range_analyzer, simplify); 1402 } 1403 } 1404