1 /* SSA Jump Threading
2    Copyright (C) 2005-2021 Free Software Foundation, Inc.
3    Contributed by Jeff Law  <law@redhat.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "predict.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfgloop.h"
31 #include "gimple-iterator.h"
32 #include "tree-cfg.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "tree-ssa-scopedtables.h"
35 #include "tree-ssa-threadedge.h"
36 #include "tree-ssa-dom.h"
37 #include "gimple-fold.h"
38 #include "cfganal.h"
39 #include "alloc-pool.h"
40 #include "vr-values.h"
41 #include "gimple-ssa-evrp-analyze.h"
42 
43 /* To avoid code explosion due to jump threading, we limit the
44    number of statements we are going to copy.  This variable
45    holds the number of statements currently seen that we'll have
46    to copy as part of the jump threading process.  */
47 static int stmt_count;
48 
49 /* Array to record value-handles per SSA_NAME.  */
50 vec<tree> ssa_name_values;
51 
52 typedef tree (pfn_simplify) (gimple *, gimple *,
53 			     class avail_exprs_stack *,
54 			     basic_block);
55 
56 /* Set the value for the SSA name NAME to VALUE.  */
57 
58 void
set_ssa_name_value(tree name,tree value)59 set_ssa_name_value (tree name, tree value)
60 {
61   if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
62     ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1, true);
63   if (value && TREE_OVERFLOW_P (value))
64     value = drop_tree_overflow (value);
65   ssa_name_values[SSA_NAME_VERSION (name)] = value;
66 }
67 
68 /* Initialize the per SSA_NAME value-handles array.  Returns it.  */
69 void
threadedge_initialize_values(void)70 threadedge_initialize_values (void)
71 {
72   gcc_assert (!ssa_name_values.exists ());
73   ssa_name_values.create (num_ssa_names);
74 }
75 
76 /* Free the per SSA_NAME value-handle array.  */
77 void
threadedge_finalize_values(void)78 threadedge_finalize_values (void)
79 {
80   ssa_name_values.release ();
81 }
82 
83 /* Return TRUE if we may be able to thread an incoming edge into
84    BB to an outgoing edge from BB.  Return FALSE otherwise.  */
85 
86 bool
potentially_threadable_block(basic_block bb)87 potentially_threadable_block (basic_block bb)
88 {
89   gimple_stmt_iterator gsi;
90 
91   /* Special case.  We can get blocks that are forwarders, but are
92      not optimized away because they forward from outside a loop
93      to the loop header.   We want to thread through them as we can
94      sometimes thread to the loop exit, which is obviously profitable.
95      the interesting case here is when the block has PHIs.  */
96   if (gsi_end_p (gsi_start_nondebug_bb (bb))
97       && !gsi_end_p (gsi_start_phis (bb)))
98     return true;
99 
100   /* If BB has a single successor or a single predecessor, then
101      there is no threading opportunity.  */
102   if (single_succ_p (bb) || single_pred_p (bb))
103     return false;
104 
105   /* If BB does not end with a conditional, switch or computed goto,
106      then there is no threading opportunity.  */
107   gsi = gsi_last_bb (bb);
108   if (gsi_end_p (gsi)
109       || ! gsi_stmt (gsi)
110       || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
111 	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
112 	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
113     return false;
114 
115   return true;
116 }
117 
118 /* Record temporary equivalences created by PHIs at the target of the
119    edge E.  Record unwind information for the equivalences into
120    CONST_AND_COPIES and EVRP_RANGE_DATA.
121 
122    If a PHI which prevents threading is encountered, then return FALSE
123    indicating we should not thread this edge, else return TRUE.  */
124 
125 static bool
record_temporary_equivalences_from_phis(edge e,const_and_copies * const_and_copies,evrp_range_analyzer * evrp_range_analyzer)126 record_temporary_equivalences_from_phis (edge e,
127     const_and_copies *const_and_copies,
128     evrp_range_analyzer *evrp_range_analyzer)
129 {
130   gphi_iterator gsi;
131 
132   /* Each PHI creates a temporary equivalence, record them.
133      These are context sensitive equivalences and will be removed
134      later.  */
135   for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
136     {
137       gphi *phi = gsi.phi ();
138       tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
139       tree dst = gimple_phi_result (phi);
140 
141       /* If the desired argument is not the same as this PHI's result
142 	 and it is set by a PHI in E->dest, then we cannot thread
143 	 through E->dest.  */
144       if (src != dst
145 	  && TREE_CODE (src) == SSA_NAME
146 	  && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
147 	  && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
148 	return false;
149 
150       /* We consider any non-virtual PHI as a statement since it
151 	 count result in a constant assignment or copy operation.  */
152       if (!virtual_operand_p (dst))
153 	stmt_count++;
154 
155       const_and_copies->record_const_or_copy (dst, src);
156 
157       /* Also update the value range associated with DST, using
158 	 the range from SRC.
159 
160 	 Note that even if SRC is a constant we need to set a suitable
161 	 output range so that VR_UNDEFINED ranges do not leak through.  */
162       if (evrp_range_analyzer)
163 	{
164 	  /* Get an empty new VR we can pass to update_value_range and save
165 	     away in the VR stack.  */
166 	  value_range_equiv *new_vr
167 			  = evrp_range_analyzer->allocate_value_range_equiv ();
168 	  new (new_vr) value_range_equiv ();
169 
170 	  /* There are three cases to consider:
171 
172 	       First if SRC is an SSA_NAME, then we can copy the value
173 	       range from SRC into NEW_VR.
174 
175 	       Second if SRC is an INTEGER_CST, then we can just wet
176 	       NEW_VR to a singleton range.
177 
178 	       Otherwise set NEW_VR to varying.  This may be overly
179 	       conservative.  */
180 	  if (TREE_CODE (src) == SSA_NAME)
181 	    new_vr->deep_copy (evrp_range_analyzer->get_value_range (src));
182 	  else if (TREE_CODE (src) == INTEGER_CST)
183 	    new_vr->set (src);
184 	  else
185 	    new_vr->set_varying (TREE_TYPE (src));
186 
187 	  /* This is a temporary range for DST, so push it.  */
188 	  evrp_range_analyzer->push_value_range (dst, new_vr);
189 	}
190     }
191   return true;
192 }
193 
194 /* Valueize hook for gimple_fold_stmt_to_constant_1.  */
195 
196 static tree
threadedge_valueize(tree t)197 threadedge_valueize (tree t)
198 {
199   if (TREE_CODE (t) == SSA_NAME)
200     {
201       tree tem = SSA_NAME_VALUE (t);
202       if (tem)
203 	return tem;
204     }
205   return t;
206 }
207 
208 /* Try to simplify each statement in E->dest, ultimately leading to
209    a simplification of the COND_EXPR at the end of E->dest.
210 
211    Record unwind information for temporary equivalences onto STACK.
212 
213    Use SIMPLIFY (a pointer to a callback function) to further simplify
214    statements using pass specific information.
215 
216    We might consider marking just those statements which ultimately
217    feed the COND_EXPR.  It's not clear if the overhead of bookkeeping
218    would be recovered by trying to simplify fewer statements.
219 
220    If we are able to simplify a statement into the form
221    SSA_NAME = (SSA_NAME | gimple invariant), then we can record
222    a context sensitive equivalence which may help us simplify
223    later statements in E->dest.  */
224 
225 static gimple *
record_temporary_equivalences_from_stmts_at_dest(edge e,const_and_copies * const_and_copies,avail_exprs_stack * avail_exprs_stack,evrp_range_analyzer * evrp_range_analyzer,pfn_simplify simplify)226 record_temporary_equivalences_from_stmts_at_dest (edge e,
227     const_and_copies *const_and_copies,
228     avail_exprs_stack *avail_exprs_stack,
229     evrp_range_analyzer *evrp_range_analyzer,
230     pfn_simplify simplify)
231 {
232   gimple *stmt = NULL;
233   gimple_stmt_iterator gsi;
234   int max_stmt_count;
235 
236   max_stmt_count = param_max_jump_thread_duplication_stmts;
237 
238   /* Walk through each statement in the block recording equivalences
239      we discover.  Note any equivalences we discover are context
240      sensitive (ie, are dependent on traversing E) and must be unwound
241      when we're finished processing E.  */
242   for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
243     {
244       tree cached_lhs = NULL;
245 
246       stmt = gsi_stmt (gsi);
247 
248       /* Ignore empty statements and labels.  */
249       if (gimple_code (stmt) == GIMPLE_NOP
250 	  || gimple_code (stmt) == GIMPLE_LABEL
251 	  || is_gimple_debug (stmt))
252 	continue;
253 
254       /* If the statement has volatile operands, then we assume we
255 	 cannot thread through this block.  This is overly
256 	 conservative in some ways.  */
257       if (gimple_code (stmt) == GIMPLE_ASM
258 	  && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
259 	return NULL;
260 
261       /* If the statement is a unique builtin, we cannot thread
262 	 through here.  */
263       if (gimple_code (stmt) == GIMPLE_CALL
264 	  && gimple_call_internal_p (stmt)
265 	  && gimple_call_internal_unique_p (stmt))
266 	return NULL;
267 
268       /* We cannot thread through __builtin_constant_p, because an
269 	 expression that is constant on two threading paths may become
270 	 non-constant (i.e.: phi) when they merge.  */
271       if (gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P))
272 	return NULL;
273 
274       /* If duplicating this block is going to cause too much code
275 	 expansion, then do not thread through this block.  */
276       stmt_count++;
277       if (stmt_count > max_stmt_count)
278 	{
279 	  /* If any of the stmts in the PATH's dests are going to be
280 	     killed due to threading, grow the max count
281 	     accordingly.  */
282 	  if (max_stmt_count
283 	      == param_max_jump_thread_duplication_stmts)
284 	    {
285 	      max_stmt_count += estimate_threading_killed_stmts (e->dest);
286 	      if (dump_file)
287 		fprintf (dump_file, "threading bb %i up to %i stmts\n",
288 			 e->dest->index, max_stmt_count);
289 	    }
290 	  /* If we're still past the limit, we're done.  */
291 	  if (stmt_count > max_stmt_count)
292 	    return NULL;
293 	}
294 
295       /* These are temporary ranges, do nto reflect them back into
296 	 the global range data.  */
297       if (evrp_range_analyzer)
298 	evrp_range_analyzer->record_ranges_from_stmt (stmt, true);
299 
300       /* If this is not a statement that sets an SSA_NAME to a new
301 	 value, then do not try to simplify this statement as it will
302 	 not simplify in any way that is helpful for jump threading.  */
303       if ((gimple_code (stmt) != GIMPLE_ASSIGN
304            || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
305           && (gimple_code (stmt) != GIMPLE_CALL
306               || gimple_call_lhs (stmt) == NULL_TREE
307               || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
308 	continue;
309 
310       /* The result of __builtin_object_size depends on all the arguments
311 	 of a phi node. Temporarily using only one edge produces invalid
312 	 results. For example
313 
314 	 if (x < 6)
315 	   goto l;
316 	 else
317 	   goto l;
318 
319 	 l:
320 	 r = PHI <&w[2].a[1](2), &a.a[6](3)>
321 	 __builtin_object_size (r, 0)
322 
323 	 The result of __builtin_object_size is defined to be the maximum of
324 	 remaining bytes. If we use only one edge on the phi, the result will
325 	 change to be the remaining bytes for the corresponding phi argument.
326 
327 	 Similarly for __builtin_constant_p:
328 
329 	 r = PHI <1(2), 2(3)>
330 	 __builtin_constant_p (r)
331 
332 	 Both PHI arguments are constant, but x ? 1 : 2 is still not
333 	 constant.  */
334 
335       if (is_gimple_call (stmt))
336 	{
337 	  tree fndecl = gimple_call_fndecl (stmt);
338 	  if (fndecl
339 	      && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
340 	      && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
341 		  || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
342 	    continue;
343 	}
344 
345       /* At this point we have a statement which assigns an RHS to an
346 	 SSA_VAR on the LHS.  We want to try and simplify this statement
347 	 to expose more context sensitive equivalences which in turn may
348 	 allow us to simplify the condition at the end of the loop.
349 
350 	 Handle simple copy operations as well as implied copies from
351 	 ASSERT_EXPRs.  */
352       if (gimple_assign_single_p (stmt)
353           && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
354 	cached_lhs = gimple_assign_rhs1 (stmt);
355       else if (gimple_assign_single_p (stmt)
356                && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
357 	cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
358       else
359 	{
360 	  /* A statement that is not a trivial copy or ASSERT_EXPR.
361 	     Try to fold the new expression.  Inserting the
362 	     expression into the hash table is unlikely to help.  */
363 	  /* ???  The DOM callback below can be changed to setting
364 	     the mprts_hook around the call to thread_across_edge,
365 	     avoiding the use substitution.  The VRP hook should be
366 	     changed to properly valueize operands itself using
367 	     SSA_NAME_VALUE in addition to its own lattice.  */
368 	  cached_lhs = gimple_fold_stmt_to_constant_1 (stmt,
369 						       threadedge_valueize);
370           if (NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES) != 0
371 	      && (!cached_lhs
372                   || (TREE_CODE (cached_lhs) != SSA_NAME
373                       && !is_gimple_min_invariant (cached_lhs))))
374 	    {
375 	      /* We're going to temporarily copy propagate the operands
376 		 and see if that allows us to simplify this statement.  */
377 	      tree *copy;
378 	      ssa_op_iter iter;
379 	      use_operand_p use_p;
380 	      unsigned int num, i = 0;
381 
382 	      num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES);
383 	      copy = XALLOCAVEC (tree, num);
384 
385 	      /* Make a copy of the uses & vuses into USES_COPY, then cprop into
386 		 the operands.  */
387 	      FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
388 		{
389 		  tree tmp = NULL;
390 		  tree use = USE_FROM_PTR (use_p);
391 
392 		  copy[i++] = use;
393 		  if (TREE_CODE (use) == SSA_NAME)
394 		    tmp = SSA_NAME_VALUE (use);
395 		  if (tmp)
396 		    SET_USE (use_p, tmp);
397 		}
398 
399 	      cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack, e->src);
400 
401 	      /* Restore the statement's original uses/defs.  */
402 	      i = 0;
403 	      FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
404 		SET_USE (use_p, copy[i++]);
405 	    }
406 	}
407 
408       /* Record the context sensitive equivalence if we were able
409 	 to simplify this statement.  */
410       if (cached_lhs
411 	  && (TREE_CODE (cached_lhs) == SSA_NAME
412 	      || is_gimple_min_invariant (cached_lhs)))
413 	const_and_copies->record_const_or_copy (gimple_get_lhs (stmt),
414 						cached_lhs);
415     }
416   return stmt;
417 }
418 
419 static tree simplify_control_stmt_condition_1 (edge, gimple *,
420 					       class avail_exprs_stack *,
421 					       tree, enum tree_code, tree,
422 					       gcond *, pfn_simplify,
423 					       unsigned);
424 
425 /* Simplify the control statement at the end of the block E->dest.
426 
427    To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
428    is available to use/clobber in DUMMY_COND.
429 
430    Use SIMPLIFY (a pointer to a callback function) to further simplify
431    a condition using pass specific information.
432 
433    Return the simplified condition or NULL if simplification could
434    not be performed.  When simplifying a GIMPLE_SWITCH, we may return
435    the CASE_LABEL_EXPR that will be taken.
436 
437    The available expression table is referenced via AVAIL_EXPRS_STACK.  */
438 
439 static tree
simplify_control_stmt_condition(edge e,gimple * stmt,class avail_exprs_stack * avail_exprs_stack,gcond * dummy_cond,pfn_simplify simplify)440 simplify_control_stmt_condition (edge e,
441 				 gimple *stmt,
442 				 class avail_exprs_stack *avail_exprs_stack,
443 				 gcond *dummy_cond,
444 				 pfn_simplify simplify)
445 {
446   tree cond, cached_lhs;
447   enum gimple_code code = gimple_code (stmt);
448 
449   /* For comparisons, we have to update both operands, then try
450      to simplify the comparison.  */
451   if (code == GIMPLE_COND)
452     {
453       tree op0, op1;
454       enum tree_code cond_code;
455 
456       op0 = gimple_cond_lhs (stmt);
457       op1 = gimple_cond_rhs (stmt);
458       cond_code = gimple_cond_code (stmt);
459 
460       /* Get the current value of both operands.  */
461       if (TREE_CODE (op0) == SSA_NAME)
462 	{
463 	  for (int i = 0; i < 2; i++)
464 	    {
465 	      if (TREE_CODE (op0) == SSA_NAME
466 		  && SSA_NAME_VALUE (op0))
467 		op0 = SSA_NAME_VALUE (op0);
468 	      else
469 		break;
470 	    }
471 	}
472 
473       if (TREE_CODE (op1) == SSA_NAME)
474 	{
475 	  for (int i = 0; i < 2; i++)
476 	    {
477 	      if (TREE_CODE (op1) == SSA_NAME
478 		  && SSA_NAME_VALUE (op1))
479 		op1 = SSA_NAME_VALUE (op1);
480 	      else
481 		break;
482 	    }
483 	}
484 
485       const unsigned recursion_limit = 4;
486 
487       cached_lhs
488 	= simplify_control_stmt_condition_1 (e, stmt, avail_exprs_stack,
489 					     op0, cond_code, op1,
490 					     dummy_cond, simplify,
491 					     recursion_limit);
492 
493       /* If we were testing an integer/pointer against a constant, then
494 	 we can use the FSM code to trace the value of the SSA_NAME.  If
495 	 a value is found, then the condition will collapse to a constant.
496 
497 	 Return the SSA_NAME we want to trace back rather than the full
498 	 expression and give the FSM threader a chance to find its value.  */
499       if (cached_lhs == NULL)
500 	{
501 	  /* Recover the original operands.  They may have been simplified
502 	     using context sensitive equivalences.  Those context sensitive
503 	     equivalences may not be valid on paths found by the FSM optimizer.  */
504 	  tree op0 = gimple_cond_lhs (stmt);
505 	  tree op1 = gimple_cond_rhs (stmt);
506 
507 	  if ((INTEGRAL_TYPE_P (TREE_TYPE (op0))
508 	       || POINTER_TYPE_P (TREE_TYPE (op0)))
509 	      && TREE_CODE (op0) == SSA_NAME
510 	      && TREE_CODE (op1) == INTEGER_CST)
511 	    return op0;
512 	}
513 
514       return cached_lhs;
515     }
516 
517   if (code == GIMPLE_SWITCH)
518     cond = gimple_switch_index (as_a <gswitch *> (stmt));
519   else if (code == GIMPLE_GOTO)
520     cond = gimple_goto_dest (stmt);
521   else
522     gcc_unreachable ();
523 
524   /* We can have conditionals which just test the state of a variable
525      rather than use a relational operator.  These are simpler to handle.  */
526   if (TREE_CODE (cond) == SSA_NAME)
527     {
528       tree original_lhs = cond;
529       cached_lhs = cond;
530 
531       /* Get the variable's current value from the equivalence chains.
532 
533 	 It is possible to get loops in the SSA_NAME_VALUE chains
534 	 (consider threading the backedge of a loop where we have
535 	 a loop invariant SSA_NAME used in the condition).  */
536       if (cached_lhs)
537 	{
538 	  for (int i = 0; i < 2; i++)
539 	    {
540 	      if (TREE_CODE (cached_lhs) == SSA_NAME
541 		  && SSA_NAME_VALUE (cached_lhs))
542 		cached_lhs = SSA_NAME_VALUE (cached_lhs);
543 	      else
544 		break;
545 	    }
546 	}
547 
548       /* If we haven't simplified to an invariant yet, then use the
549 	 pass specific callback to try and simplify it further.  */
550       if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
551 	{
552 	  if (code == GIMPLE_SWITCH)
553 	    {
554 	      /* Replace the index operand of the GIMPLE_SWITCH with any LHS
555 		 we found before handing off to VRP.  If simplification is
556 	         possible, the simplified value will be a CASE_LABEL_EXPR of
557 		 the label that is proven to be taken.  */
558 	      gswitch *dummy_switch = as_a<gswitch *> (gimple_copy (stmt));
559 	      gimple_switch_set_index (dummy_switch, cached_lhs);
560 	      cached_lhs = (*simplify) (dummy_switch, stmt,
561 					avail_exprs_stack, e->src);
562 	      ggc_free (dummy_switch);
563 	    }
564 	  else
565 	    cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack, e->src);
566 	}
567 
568       /* We couldn't find an invariant.  But, callers of this
569 	 function may be able to do something useful with the
570 	 unmodified destination.  */
571       if (!cached_lhs)
572 	cached_lhs = original_lhs;
573     }
574   else
575     cached_lhs = NULL;
576 
577   return cached_lhs;
578 }
579 
580 /* Recursive helper for simplify_control_stmt_condition.  */
581 
582 static tree
simplify_control_stmt_condition_1(edge e,gimple * stmt,class avail_exprs_stack * avail_exprs_stack,tree op0,enum tree_code cond_code,tree op1,gcond * dummy_cond,pfn_simplify simplify,unsigned limit)583 simplify_control_stmt_condition_1 (edge e,
584 				   gimple *stmt,
585 				   class avail_exprs_stack *avail_exprs_stack,
586 				   tree op0,
587 				   enum tree_code cond_code,
588 				   tree op1,
589 				   gcond *dummy_cond,
590 				   pfn_simplify simplify,
591 				   unsigned limit)
592 {
593   if (limit == 0)
594     return NULL_TREE;
595 
596   /* We may need to canonicalize the comparison.  For
597      example, op0 might be a constant while op1 is an
598      SSA_NAME.  Failure to canonicalize will cause us to
599      miss threading opportunities.  */
600   if (tree_swap_operands_p (op0, op1))
601     {
602       cond_code = swap_tree_comparison (cond_code);
603       std::swap (op0, op1);
604     }
605 
606   /* If the condition has the form (A & B) CMP 0 or (A | B) CMP 0 then
607      recurse into the LHS to see if there is a dominating ASSERT_EXPR
608      of A or of B that makes this condition always true or always false
609      along the edge E.  */
610   if ((cond_code == EQ_EXPR || cond_code == NE_EXPR)
611       && TREE_CODE (op0) == SSA_NAME
612       && integer_zerop (op1))
613     {
614       gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
615       if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
616         ;
617       else if (gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR
618 	       || gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
619 	{
620 	  enum tree_code rhs_code = gimple_assign_rhs_code (def_stmt);
621 	  const tree rhs1 = gimple_assign_rhs1 (def_stmt);
622 	  const tree rhs2 = gimple_assign_rhs2 (def_stmt);
623 
624 	  /* Is A != 0 ?  */
625 	  const tree res1
626 	    = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack,
627 						 rhs1, NE_EXPR, op1,
628 						 dummy_cond, simplify,
629 						 limit - 1);
630 	  if (res1 == NULL_TREE)
631 	    ;
632 	  else if (rhs_code == BIT_AND_EXPR && integer_zerop (res1))
633 	    {
634 	      /* If A == 0 then (A & B) != 0 is always false.  */
635 	      if (cond_code == NE_EXPR)
636 	        return boolean_false_node;
637 	      /* If A == 0 then (A & B) == 0 is always true.  */
638 	      if (cond_code == EQ_EXPR)
639 		return boolean_true_node;
640 	    }
641 	  else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res1))
642 	    {
643 	      /* If A != 0 then (A | B) != 0 is always true.  */
644 	      if (cond_code == NE_EXPR)
645 		return boolean_true_node;
646 	      /* If A != 0 then (A | B) == 0 is always false.  */
647 	      if (cond_code == EQ_EXPR)
648 		return boolean_false_node;
649 	    }
650 
651 	  /* Is B != 0 ?  */
652 	  const tree res2
653 	    = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack,
654 						 rhs2, NE_EXPR, op1,
655 						 dummy_cond, simplify,
656 						 limit - 1);
657 	  if (res2 == NULL_TREE)
658 	    ;
659 	  else if (rhs_code == BIT_AND_EXPR && integer_zerop (res2))
660 	    {
661 	      /* If B == 0 then (A & B) != 0 is always false.  */
662 	      if (cond_code == NE_EXPR)
663 	        return boolean_false_node;
664 	      /* If B == 0 then (A & B) == 0 is always true.  */
665 	      if (cond_code == EQ_EXPR)
666 		return boolean_true_node;
667 	    }
668 	  else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res2))
669 	    {
670 	      /* If B != 0 then (A | B) != 0 is always true.  */
671 	      if (cond_code == NE_EXPR)
672 		return boolean_true_node;
673 	      /* If B != 0 then (A | B) == 0 is always false.  */
674 	      if (cond_code == EQ_EXPR)
675 		return boolean_false_node;
676 	    }
677 
678 	  if (res1 != NULL_TREE && res2 != NULL_TREE)
679 	    {
680 	      if (rhs_code == BIT_AND_EXPR
681 		  && TYPE_PRECISION (TREE_TYPE (op0)) == 1
682 		  && integer_nonzerop (res1)
683 		  && integer_nonzerop (res2))
684 		{
685 		  /* If A != 0 and B != 0 then (bool)(A & B) != 0 is true.  */
686 		  if (cond_code == NE_EXPR)
687 		    return boolean_true_node;
688 		  /* If A != 0 and B != 0 then (bool)(A & B) == 0 is false.  */
689 		  if (cond_code == EQ_EXPR)
690 		    return boolean_false_node;
691 		}
692 
693 	      if (rhs_code == BIT_IOR_EXPR
694 		  && integer_zerop (res1)
695 		  && integer_zerop (res2))
696 		{
697 		  /* If A == 0 and B == 0 then (A | B) != 0 is false.  */
698 		  if (cond_code == NE_EXPR)
699 		    return boolean_false_node;
700 		  /* If A == 0 and B == 0 then (A | B) == 0 is true.  */
701 		  if (cond_code == EQ_EXPR)
702 		    return boolean_true_node;
703 		}
704 	    }
705 	}
706       /* Handle (A CMP B) CMP 0.  */
707       else if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
708 	       == tcc_comparison)
709 	{
710 	  tree rhs1 = gimple_assign_rhs1 (def_stmt);
711 	  tree rhs2 = gimple_assign_rhs2 (def_stmt);
712 
713 	  tree_code new_cond = gimple_assign_rhs_code (def_stmt);
714 	  if (cond_code == EQ_EXPR)
715 	    new_cond = invert_tree_comparison (new_cond, false);
716 
717 	  tree res
718 	    = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack,
719 						 rhs1, new_cond, rhs2,
720 						 dummy_cond, simplify,
721 						 limit - 1);
722 	  if (res != NULL_TREE && is_gimple_min_invariant (res))
723 	    return res;
724 	}
725     }
726 
727   gimple_cond_set_code (dummy_cond, cond_code);
728   gimple_cond_set_lhs (dummy_cond, op0);
729   gimple_cond_set_rhs (dummy_cond, op1);
730 
731   /* We absolutely do not care about any type conversions
732      we only care about a zero/nonzero value.  */
733   fold_defer_overflow_warnings ();
734 
735   tree res = fold_binary (cond_code, boolean_type_node, op0, op1);
736   if (res)
737     while (CONVERT_EXPR_P (res))
738       res = TREE_OPERAND (res, 0);
739 
740   fold_undefer_overflow_warnings ((res && is_gimple_min_invariant (res)),
741 				  stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
742 
743   /* If we have not simplified the condition down to an invariant,
744      then use the pass specific callback to simplify the condition.  */
745   if (!res
746       || !is_gimple_min_invariant (res))
747     res = (*simplify) (dummy_cond, stmt, avail_exprs_stack, e->src);
748 
749   return res;
750 }
751 
752 /* Copy debug stmts from DEST's chain of single predecessors up to
753    SRC, so that we don't lose the bindings as PHI nodes are introduced
754    when DEST gains new predecessors.  */
755 void
propagate_threaded_block_debug_into(basic_block dest,basic_block src)756 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
757 {
758   if (!MAY_HAVE_DEBUG_BIND_STMTS)
759     return;
760 
761   if (!single_pred_p (dest))
762     return;
763 
764   gcc_checking_assert (dest != src);
765 
766   gimple_stmt_iterator gsi = gsi_after_labels (dest);
767   int i = 0;
768   const int alloc_count = 16; // ?? Should this be a PARAM?
769 
770   /* Estimate the number of debug vars overridden in the beginning of
771      DEST, to tell how many we're going to need to begin with.  */
772   for (gimple_stmt_iterator si = gsi;
773        i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
774     {
775       gimple *stmt = gsi_stmt (si);
776       if (!is_gimple_debug (stmt))
777 	break;
778       if (gimple_debug_nonbind_marker_p (stmt))
779 	continue;
780       i++;
781     }
782 
783   auto_vec<tree, alloc_count> fewvars;
784   hash_set<tree> *vars = NULL;
785 
786   /* If we're already starting with 3/4 of alloc_count, go for a
787      hash_set, otherwise start with an unordered stack-allocated
788      VEC.  */
789   if (i * 4 > alloc_count * 3)
790     vars = new hash_set<tree>;
791 
792   /* Now go through the initial debug stmts in DEST again, this time
793      actually inserting in VARS or FEWVARS.  Don't bother checking for
794      duplicates in FEWVARS.  */
795   for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
796     {
797       gimple *stmt = gsi_stmt (si);
798       if (!is_gimple_debug (stmt))
799 	break;
800 
801       tree var;
802 
803       if (gimple_debug_bind_p (stmt))
804 	var = gimple_debug_bind_get_var (stmt);
805       else if (gimple_debug_source_bind_p (stmt))
806 	var = gimple_debug_source_bind_get_var (stmt);
807       else if (gimple_debug_nonbind_marker_p (stmt))
808 	continue;
809       else
810 	gcc_unreachable ();
811 
812       if (vars)
813 	vars->add (var);
814       else
815 	fewvars.quick_push (var);
816     }
817 
818   basic_block bb = dest;
819 
820   do
821     {
822       bb = single_pred (bb);
823       for (gimple_stmt_iterator si = gsi_last_bb (bb);
824 	   !gsi_end_p (si); gsi_prev (&si))
825 	{
826 	  gimple *stmt = gsi_stmt (si);
827 	  if (!is_gimple_debug (stmt))
828 	    continue;
829 
830 	  tree var;
831 
832 	  if (gimple_debug_bind_p (stmt))
833 	    var = gimple_debug_bind_get_var (stmt);
834 	  else if (gimple_debug_source_bind_p (stmt))
835 	    var = gimple_debug_source_bind_get_var (stmt);
836 	  else if (gimple_debug_nonbind_marker_p (stmt))
837 	    continue;
838 	  else
839 	    gcc_unreachable ();
840 
841 	  /* Discard debug bind overlaps.  Unlike stmts from src,
842 	     copied into a new block that will precede BB, debug bind
843 	     stmts in bypassed BBs may actually be discarded if
844 	     they're overwritten by subsequent debug bind stmts.  We
845 	     want to copy binds for all modified variables, so that we
846 	     retain a bind to the shared def if there is one, or to a
847 	     newly introduced PHI node if there is one.  Our bind will
848 	     end up reset if the value is dead, but that implies the
849 	     variable couldn't have survived, so it's fine.  We are
850 	     not actually running the code that performed the binds at
851 	     this point, we're just adding binds so that they survive
852 	     the new confluence, so markers should not be copied.  */
853 	  if (vars && vars->add (var))
854 	    continue;
855 	  else if (!vars)
856 	    {
857 	      int i = fewvars.length ();
858 	      while (i--)
859 		if (fewvars[i] == var)
860 		  break;
861 	      if (i >= 0)
862 		continue;
863 	      else if (fewvars.length () < (unsigned) alloc_count)
864 		fewvars.quick_push (var);
865 	      else
866 		{
867 		  vars = new hash_set<tree>;
868 		  for (i = 0; i < alloc_count; i++)
869 		    vars->add (fewvars[i]);
870 		  fewvars.release ();
871 		  vars->add (var);
872 		}
873 	    }
874 
875 	  stmt = gimple_copy (stmt);
876 	  /* ??? Should we drop the location of the copy to denote
877 	     they're artificial bindings?  */
878 	  gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
879 	}
880     }
881   while (bb != src && single_pred_p (bb));
882 
883   if (vars)
884     delete vars;
885   else if (fewvars.exists ())
886     fewvars.release ();
887 }
888 
889 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
890    need not be duplicated as part of the CFG/SSA updating process).
891 
892    If it is threadable, add it to PATH and VISITED and recurse, ultimately
893    returning TRUE from the toplevel call.   Otherwise do nothing and
894    return false.
895 
896    DUMMY_COND, SIMPLIFY are used to try and simplify the condition at the
897    end of TAKEN_EDGE->dest.
898 
899    The available expression table is referenced via AVAIL_EXPRS_STACK.  */
900 
901 static bool
thread_around_empty_blocks(edge taken_edge,gcond * dummy_cond,class avail_exprs_stack * avail_exprs_stack,pfn_simplify simplify,bitmap visited,vec<jump_thread_edge * > * path)902 thread_around_empty_blocks (edge taken_edge,
903 			    gcond *dummy_cond,
904 			    class avail_exprs_stack *avail_exprs_stack,
905 			    pfn_simplify simplify,
906 			    bitmap visited,
907 			    vec<jump_thread_edge *> *path)
908 {
909   basic_block bb = taken_edge->dest;
910   gimple_stmt_iterator gsi;
911   gimple *stmt;
912   tree cond;
913 
914   /* The key property of these blocks is that they need not be duplicated
915      when threading.  Thus they cannot have visible side effects such
916      as PHI nodes.  */
917   if (!gsi_end_p (gsi_start_phis (bb)))
918     return false;
919 
920   /* Skip over DEBUG statements at the start of the block.  */
921   gsi = gsi_start_nondebug_bb (bb);
922 
923   /* If the block has no statements, but does have a single successor, then
924      it's just a forwarding block and we can thread through it trivially.
925 
926      However, note that just threading through empty blocks with single
927      successors is not inherently profitable.  For the jump thread to
928      be profitable, we must avoid a runtime conditional.
929 
930      By taking the return value from the recursive call, we get the
931      desired effect of returning TRUE when we found a profitable jump
932      threading opportunity and FALSE otherwise.
933 
934      This is particularly important when this routine is called after
935      processing a joiner block.  Returning TRUE too aggressively in
936      that case results in pointless duplication of the joiner block.  */
937   if (gsi_end_p (gsi))
938     {
939       if (single_succ_p (bb))
940 	{
941 	  taken_edge = single_succ_edge (bb);
942 
943 	  if ((taken_edge->flags & EDGE_DFS_BACK) != 0)
944 	    return false;
945 
946 	  if (!bitmap_bit_p (visited, taken_edge->dest->index))
947 	    {
948 	      jump_thread_edge *x
949 		= new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
950 	      path->safe_push (x);
951 	      bitmap_set_bit (visited, taken_edge->dest->index);
952 	      return thread_around_empty_blocks (taken_edge,
953 						 dummy_cond,
954 						 avail_exprs_stack,
955 						 simplify,
956 						 visited,
957 						 path);
958 	    }
959 	}
960 
961       /* We have a block with no statements, but multiple successors?  */
962       return false;
963     }
964 
965   /* The only real statements this block can have are a control
966      flow altering statement.  Anything else stops the thread.  */
967   stmt = gsi_stmt (gsi);
968   if (gimple_code (stmt) != GIMPLE_COND
969       && gimple_code (stmt) != GIMPLE_GOTO
970       && gimple_code (stmt) != GIMPLE_SWITCH)
971     return false;
972 
973   /* Extract and simplify the condition.  */
974   cond = simplify_control_stmt_condition (taken_edge, stmt,
975 					  avail_exprs_stack, dummy_cond,
976 					  simplify);
977 
978   /* If the condition can be statically computed and we have not already
979      visited the destination edge, then add the taken edge to our thread
980      path.  */
981   if (cond != NULL_TREE
982       && (is_gimple_min_invariant (cond)
983 	  || TREE_CODE (cond) == CASE_LABEL_EXPR))
984     {
985       if (TREE_CODE (cond) == CASE_LABEL_EXPR)
986 	taken_edge = find_edge (bb, label_to_block (cfun, CASE_LABEL (cond)));
987       else
988 	taken_edge = find_taken_edge (bb, cond);
989 
990       if (!taken_edge
991 	  || (taken_edge->flags & EDGE_DFS_BACK) != 0)
992 	return false;
993 
994       if (bitmap_bit_p (visited, taken_edge->dest->index))
995 	return false;
996       bitmap_set_bit (visited, taken_edge->dest->index);
997 
998       jump_thread_edge *x
999 	= new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
1000       path->safe_push (x);
1001 
1002       thread_around_empty_blocks (taken_edge,
1003 				  dummy_cond,
1004 				  avail_exprs_stack,
1005 				  simplify,
1006 				  visited,
1007 				  path);
1008       return true;
1009     }
1010 
1011   return false;
1012 }
1013 
1014 /* We are exiting E->src, see if E->dest ends with a conditional
1015    jump which has a known value when reached via E.
1016 
1017    E->dest can have arbitrary side effects which, if threading is
1018    successful, will be maintained.
1019 
1020    Special care is necessary if E is a back edge in the CFG as we
1021    may have already recorded equivalences for E->dest into our
1022    various tables, including the result of the conditional at
1023    the end of E->dest.  Threading opportunities are severely
1024    limited in that case to avoid short-circuiting the loop
1025    incorrectly.
1026 
1027    DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1028    to avoid allocating memory.
1029 
1030    STACK is used to undo temporary equivalences created during the walk of
1031    E->dest.
1032 
1033    SIMPLIFY is a pass-specific function used to simplify statements.
1034 
1035    Our caller is responsible for restoring the state of the expression
1036    and const_and_copies stacks.
1037 
1038    Positive return value is success.  Zero return value is failure, but
1039    the block can still be duplicated as a joiner in a jump thread path,
1040    negative indicates the block should not be duplicated and thus is not
1041    suitable for a joiner in a jump threading path.  */
1042 
1043 static int
thread_through_normal_block(edge e,gcond * dummy_cond,const_and_copies * const_and_copies,avail_exprs_stack * avail_exprs_stack,evrp_range_analyzer * evrp_range_analyzer,pfn_simplify simplify,vec<jump_thread_edge * > * path,bitmap visited)1044 thread_through_normal_block (edge e,
1045 			     gcond *dummy_cond,
1046 			     const_and_copies *const_and_copies,
1047 			     avail_exprs_stack *avail_exprs_stack,
1048 			     evrp_range_analyzer *evrp_range_analyzer,
1049 			     pfn_simplify simplify,
1050 			     vec<jump_thread_edge *> *path,
1051 			     bitmap visited)
1052 {
1053   /* We want to record any equivalences created by traversing E.  */
1054   record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
1055 
1056   /* PHIs create temporary equivalences.
1057      Note that if we found a PHI that made the block non-threadable, then
1058      we need to bubble that up to our caller in the same manner we do
1059      when we prematurely stop processing statements below.  */
1060   if (!record_temporary_equivalences_from_phis (e, const_and_copies,
1061 					        evrp_range_analyzer))
1062     return -1;
1063 
1064   /* Now walk each statement recording any context sensitive
1065      temporary equivalences we can detect.  */
1066   gimple *stmt
1067     = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies,
1068 							avail_exprs_stack,
1069 							evrp_range_analyzer,
1070 							simplify);
1071 
1072   /* There's two reasons STMT might be null, and distinguishing
1073      between them is important.
1074 
1075      First the block may not have had any statements.  For example, it
1076      might have some PHIs and unconditionally transfer control elsewhere.
1077      Such blocks are suitable for jump threading, particularly as a
1078      joiner block.
1079 
1080      The second reason would be if we did not process all the statements
1081      in the block (because there were too many to make duplicating the
1082      block profitable.   If we did not look at all the statements, then
1083      we may not have invalidated everything needing invalidation.  Thus
1084      we must signal to our caller that this block is not suitable for
1085      use as a joiner in a threading path.  */
1086   if (!stmt)
1087     {
1088       /* First case.  The statement simply doesn't have any instructions, but
1089 	 does have PHIs.  */
1090       if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
1091 	  && !gsi_end_p (gsi_start_phis (e->dest)))
1092 	return 0;
1093 
1094       /* Second case.  */
1095       return -1;
1096     }
1097 
1098   /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
1099      will be taken.  */
1100   if (gimple_code (stmt) == GIMPLE_COND
1101       || gimple_code (stmt) == GIMPLE_GOTO
1102       || gimple_code (stmt) == GIMPLE_SWITCH)
1103     {
1104       tree cond;
1105 
1106       /* Extract and simplify the condition.  */
1107       cond = simplify_control_stmt_condition (e, stmt, avail_exprs_stack,
1108 					      dummy_cond, simplify);
1109 
1110       if (!cond)
1111 	return 0;
1112 
1113       if (is_gimple_min_invariant (cond)
1114 	  || TREE_CODE (cond) == CASE_LABEL_EXPR)
1115 	{
1116 	  edge taken_edge;
1117 	  if (TREE_CODE (cond) == CASE_LABEL_EXPR)
1118 	    taken_edge = find_edge (e->dest,
1119 				    label_to_block (cfun, CASE_LABEL (cond)));
1120 	  else
1121 	    taken_edge = find_taken_edge (e->dest, cond);
1122 
1123 	  basic_block dest = (taken_edge ? taken_edge->dest : NULL);
1124 
1125 	  /* DEST could be NULL for a computed jump to an absolute
1126 	     address.  */
1127 	  if (dest == NULL
1128 	      || dest == e->dest
1129 	      || (taken_edge->flags & EDGE_DFS_BACK) != 0
1130 	      || bitmap_bit_p (visited, dest->index))
1131 	    return 0;
1132 
1133 	  /* Only push the EDGE_START_JUMP_THREAD marker if this is
1134 	     first edge on the path.  */
1135 	  if (path->length () == 0)
1136 	    {
1137               jump_thread_edge *x
1138 	        = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1139 	      path->safe_push (x);
1140 	    }
1141 
1142 	  jump_thread_edge *x
1143 	    = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1144 	  path->safe_push (x);
1145 
1146 	  /* See if we can thread through DEST as well, this helps capture
1147 	     secondary effects of threading without having to re-run DOM or
1148 	     VRP.
1149 
1150 	     We don't want to thread back to a block we have already
1151  	     visited.  This may be overly conservative.  */
1152 	  bitmap_set_bit (visited, dest->index);
1153 	  bitmap_set_bit (visited, e->dest->index);
1154 	  thread_around_empty_blocks (taken_edge,
1155 				      dummy_cond,
1156 				      avail_exprs_stack,
1157 				      simplify,
1158 				      visited,
1159 				      path);
1160 	  return 1;
1161 	}
1162     }
1163   return 0;
1164 }
1165 
1166 /* There are basic blocks look like:
1167    <P0>
1168    p0 = a CMP b ; or p0 = (INT) (a CMP b)
1169    goto <X>;
1170 
1171    <P1>
1172    p1 = c CMP d
1173    goto <X>;
1174 
1175    <X>
1176    # phi = PHI <p0 (P0), p1 (P1)>
1177    if (phi != 0) goto <Y>; else goto <Z>;
1178 
1179    Then, edge (P0,X) or (P1,X) could be marked as EDGE_START_JUMP_THREAD
1180    And edge (X,Y), (X,Z) is EDGE_COPY_SRC_JOINER_BLOCK
1181 
1182    Return true if E is (P0,X) or (P1,X)  */
1183 
1184 bool
edge_forwards_cmp_to_conditional_jump_through_empty_bb_p(edge e)1185 edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (edge e)
1186 {
1187   /* See if there is only one stmt which is gcond.  */
1188   gcond *gs;
1189   if (!(gs = safe_dyn_cast<gcond *> (last_and_only_stmt (e->dest))))
1190     return false;
1191 
1192   /* See if gcond's cond is "(phi !=/== 0/1)" in the basic block.  */
1193   tree cond = gimple_cond_lhs (gs);
1194   enum tree_code code = gimple_cond_code (gs);
1195   tree rhs = gimple_cond_rhs (gs);
1196   if (TREE_CODE (cond) != SSA_NAME
1197       || (code != NE_EXPR && code != EQ_EXPR)
1198       || (!integer_onep (rhs) && !integer_zerop (rhs)))
1199     return false;
1200   gphi *phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (cond));
1201   if (phi == NULL || gimple_bb (phi) != e->dest)
1202     return false;
1203 
1204   /* Check if phi's incoming value is CMP.  */
1205   gassign *def;
1206   tree value = PHI_ARG_DEF_FROM_EDGE (phi, e);
1207   if (TREE_CODE (value) != SSA_NAME
1208       || !has_single_use (value)
1209       || !(def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (value))))
1210     return false;
1211 
1212   /* Or if it is (INT) (a CMP b).  */
1213   if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
1214     {
1215       value = gimple_assign_rhs1 (def);
1216       if (TREE_CODE (value) != SSA_NAME
1217 	  || !has_single_use (value)
1218 	  || !(def = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (value))))
1219 	return false;
1220     }
1221 
1222   if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
1223     return false;
1224 
1225   return true;
1226 }
1227 
1228 /* We are exiting E->src, see if E->dest ends with a conditional
1229    jump which has a known value when reached via E.
1230 
1231    DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1232    to avoid allocating memory.
1233 
1234    CONST_AND_COPIES is used to undo temporary equivalences created during the
1235    walk of E->dest.
1236 
1237    The available expression table is referenced vai AVAIL_EXPRS_STACK.
1238 
1239    SIMPLIFY is a pass-specific function used to simplify statements.  */
1240 
1241 static void
thread_across_edge(gcond * dummy_cond,edge e,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack,class evrp_range_analyzer * evrp_range_analyzer,pfn_simplify simplify)1242 thread_across_edge (gcond *dummy_cond,
1243 		    edge e,
1244 		    class const_and_copies *const_and_copies,
1245 		    class avail_exprs_stack *avail_exprs_stack,
1246 		    class evrp_range_analyzer *evrp_range_analyzer,
1247 		    pfn_simplify simplify)
1248 {
1249   bitmap visited = BITMAP_ALLOC (NULL);
1250 
1251   const_and_copies->push_marker ();
1252   avail_exprs_stack->push_marker ();
1253   if (evrp_range_analyzer)
1254     evrp_range_analyzer->push_marker ();
1255 
1256   stmt_count = 0;
1257 
1258   vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1259   bitmap_clear (visited);
1260   bitmap_set_bit (visited, e->src->index);
1261   bitmap_set_bit (visited, e->dest->index);
1262 
1263   int threaded;
1264   if ((e->flags & EDGE_DFS_BACK) == 0)
1265     threaded = thread_through_normal_block (e, dummy_cond,
1266 					    const_and_copies,
1267 					    avail_exprs_stack,
1268 					    evrp_range_analyzer,
1269 					    simplify, path,
1270 					    visited);
1271   else
1272     threaded = 0;
1273 
1274   if (threaded > 0)
1275     {
1276       propagate_threaded_block_debug_into (path->last ()->e->dest,
1277 					   e->dest);
1278       const_and_copies->pop_to_marker ();
1279       avail_exprs_stack->pop_to_marker ();
1280       if (evrp_range_analyzer)
1281 	evrp_range_analyzer->pop_to_marker ();
1282       BITMAP_FREE (visited);
1283       register_jump_thread (path);
1284       return;
1285     }
1286   else
1287     {
1288       /* Negative and zero return values indicate no threading was possible,
1289 	 thus there should be no edges on the thread path and no need to walk
1290 	 through the vector entries.  */
1291       gcc_assert (path->length () == 0);
1292       path->release ();
1293       delete path;
1294 
1295       /* A negative status indicates the target block was deemed too big to
1296 	 duplicate.  Just quit now rather than trying to use the block as
1297 	 a joiner in a jump threading path.
1298 
1299 	 This prevents unnecessary code growth, but more importantly if we
1300 	 do not look at all the statements in the block, then we may have
1301 	 missed some invalidations if we had traversed a backedge!  */
1302       if (threaded < 0)
1303 	{
1304 	  BITMAP_FREE (visited);
1305 	  const_and_copies->pop_to_marker ();
1306           avail_exprs_stack->pop_to_marker ();
1307 	  if (evrp_range_analyzer)
1308 	    evrp_range_analyzer->pop_to_marker ();
1309 	  return;
1310 	}
1311     }
1312 
1313  /* We were unable to determine what out edge from E->dest is taken.  However,
1314     we might still be able to thread through successors of E->dest.  This
1315     often occurs when E->dest is a joiner block which then fans back out
1316     based on redundant tests.
1317 
1318     If so, we'll copy E->dest and redirect the appropriate predecessor to
1319     the copy.  Within the copy of E->dest, we'll thread one or more edges
1320     to points deeper in the CFG.
1321 
1322     This is a stopgap until we have a more structured approach to path
1323     isolation.  */
1324   {
1325     edge taken_edge;
1326     edge_iterator ei;
1327     bool found;
1328 
1329     /* If E->dest has abnormal outgoing edges, then there's no guarantee
1330        we can safely redirect any of the edges.  Just punt those cases.  */
1331     FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1332       if (taken_edge->flags & EDGE_COMPLEX)
1333 	{
1334 	  const_and_copies->pop_to_marker ();
1335           avail_exprs_stack->pop_to_marker ();
1336 	  if (evrp_range_analyzer)
1337 	    evrp_range_analyzer->pop_to_marker ();
1338 	  BITMAP_FREE (visited);
1339 	  return;
1340 	}
1341 
1342     /* Look at each successor of E->dest to see if we can thread through it.  */
1343     FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1344       {
1345 	if ((e->flags & EDGE_DFS_BACK) != 0
1346 	    || (taken_edge->flags & EDGE_DFS_BACK) != 0)
1347 	  continue;
1348 
1349 	/* Push a fresh marker so we can unwind the equivalences created
1350 	   for each of E->dest's successors.  */
1351 	const_and_copies->push_marker ();
1352 	avail_exprs_stack->push_marker ();
1353 	if (evrp_range_analyzer)
1354 	  evrp_range_analyzer->push_marker ();
1355 
1356 	/* Avoid threading to any block we have already visited.  */
1357 	bitmap_clear (visited);
1358 	bitmap_set_bit (visited, e->src->index);
1359 	bitmap_set_bit (visited, e->dest->index);
1360 	bitmap_set_bit (visited, taken_edge->dest->index);
1361         vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1362 
1363 	/* Record whether or not we were able to thread through a successor
1364 	   of E->dest.  */
1365         jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1366 	path->safe_push (x);
1367 
1368         x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1369 	path->safe_push (x);
1370 	found = thread_around_empty_blocks (taken_edge,
1371 					    dummy_cond,
1372 					    avail_exprs_stack,
1373 					    simplify,
1374 					    visited,
1375 					    path);
1376 
1377 	if (!found)
1378 	  found = thread_through_normal_block (path->last ()->e, dummy_cond,
1379 					       const_and_copies,
1380 					       avail_exprs_stack,
1381 					       evrp_range_analyzer,
1382 					       simplify, path,
1383 					       visited) > 0;
1384 
1385 	/* If we were able to thread through a successor of E->dest, then
1386 	   record the jump threading opportunity.  */
1387 	if (found
1388 	    || edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (e))
1389 	  {
1390 	    if (taken_edge->dest != path->last ()->e->dest)
1391 	      propagate_threaded_block_debug_into (path->last ()->e->dest,
1392 						   taken_edge->dest);
1393 	    register_jump_thread (path);
1394 	  }
1395 	else
1396 	  delete_jump_thread_path (path);
1397 
1398 	/* And unwind the equivalence table.  */
1399 	if (evrp_range_analyzer)
1400 	  evrp_range_analyzer->pop_to_marker ();
1401 	avail_exprs_stack->pop_to_marker ();
1402 	const_and_copies->pop_to_marker ();
1403       }
1404     BITMAP_FREE (visited);
1405   }
1406 
1407   if (evrp_range_analyzer)
1408     evrp_range_analyzer->pop_to_marker ();
1409   const_and_copies->pop_to_marker ();
1410   avail_exprs_stack->pop_to_marker ();
1411 }
1412 
1413 /* Examine the outgoing edges from BB and conditionally
1414    try to thread them.
1415 
1416    DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1417    to avoid allocating memory.
1418 
1419    CONST_AND_COPIES is used to undo temporary equivalences created during the
1420    walk of E->dest.
1421 
1422    The available expression table is referenced vai AVAIL_EXPRS_STACK.
1423 
1424    SIMPLIFY is a pass-specific function used to simplify statements.  */
1425 
1426 void
thread_outgoing_edges(basic_block bb,gcond * dummy_cond,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack,class evrp_range_analyzer * evrp_range_analyzer,tree (* simplify)(gimple *,gimple *,class avail_exprs_stack *,basic_block))1427 thread_outgoing_edges (basic_block bb, gcond *dummy_cond,
1428 		       class const_and_copies *const_and_copies,
1429 		       class avail_exprs_stack *avail_exprs_stack,
1430 		       class evrp_range_analyzer *evrp_range_analyzer,
1431 		       tree (*simplify) (gimple *, gimple *,
1432 					 class avail_exprs_stack *,
1433 					 basic_block))
1434 {
1435   int flags = (EDGE_IGNORE | EDGE_COMPLEX | EDGE_ABNORMAL);
1436   gimple *last;
1437 
1438   /* If we have an outgoing edge to a block with multiple incoming and
1439      outgoing edges, then we may be able to thread the edge, i.e., we
1440      may be able to statically determine which of the outgoing edges
1441      will be traversed when the incoming edge from BB is traversed.  */
1442   if (single_succ_p (bb)
1443       && (single_succ_edge (bb)->flags & flags) == 0
1444       && potentially_threadable_block (single_succ (bb)))
1445     {
1446       thread_across_edge (dummy_cond, single_succ_edge (bb),
1447 			  const_and_copies, avail_exprs_stack,
1448 			  evrp_range_analyzer, simplify);
1449     }
1450   else if ((last = last_stmt (bb))
1451 	   && gimple_code (last) == GIMPLE_COND
1452 	   && EDGE_COUNT (bb->succs) == 2
1453 	   && (EDGE_SUCC (bb, 0)->flags & flags) == 0
1454 	   && (EDGE_SUCC (bb, 1)->flags & flags) == 0)
1455     {
1456       edge true_edge, false_edge;
1457 
1458       extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1459 
1460       /* Only try to thread the edge if it reaches a target block with
1461 	 more than one predecessor and more than one successor.  */
1462       if (potentially_threadable_block (true_edge->dest))
1463 	thread_across_edge (dummy_cond, true_edge,
1464 			    const_and_copies, avail_exprs_stack,
1465 			    evrp_range_analyzer, simplify);
1466 
1467       /* Similarly for the ELSE arm.  */
1468       if (potentially_threadable_block (false_edge->dest))
1469 	thread_across_edge (dummy_cond, false_edge,
1470 			    const_and_copies, avail_exprs_stack,
1471 			    evrp_range_analyzer, simplify);
1472     }
1473 }
1474