1 /* SSA Dominator optimizations for trees
2    Copyright (C) 2001-2018 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "cfgloop.h"
33 #include "gimple-fold.h"
34 #include "tree-eh.h"
35 #include "tree-inline.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-into-ssa.h"
39 #include "domwalk.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-ssa-threadupdate.h"
42 #include "params.h"
43 #include "tree-ssa-scopedtables.h"
44 #include "tree-ssa-threadedge.h"
45 #include "tree-ssa-dom.h"
46 #include "gimplify.h"
47 #include "tree-cfgcleanup.h"
48 #include "dbgcnt.h"
49 #include "alloc-pool.h"
50 #include "tree-vrp.h"
51 #include "vr-values.h"
52 #include "gimple-ssa-evrp-analyze.h"
53 
54 /* This file implements optimizations on the dominator tree.  */
55 
56 /* Structure for recording edge equivalences.
57 
58    Computing and storing the edge equivalences instead of creating
59    them on-demand can save significant amounts of time, particularly
60    for pathological cases involving switch statements.
61 
62    These structures live for a single iteration of the dominator
63    optimizer in the edge's AUX field.  At the end of an iteration we
64    free each of these structures.  */
65 class edge_info
66 {
67  public:
68   typedef std::pair <tree, tree> equiv_pair;
69   edge_info (edge);
70   ~edge_info ();
71 
72   /* Record a simple LHS = RHS equivalence.  This may trigger
73      calls to derive_equivalences.  */
74   void record_simple_equiv (tree, tree);
75 
76   /* If traversing this edge creates simple equivalences, we store
77      them as LHS/RHS pairs within this vector.  */
78   vec<equiv_pair> simple_equivalences;
79 
80   /* Traversing an edge may also indicate one or more particular conditions
81      are true or false.  */
82   vec<cond_equivalence> cond_equivalences;
83 
84  private:
85   /* Derive equivalences by walking the use-def chains.  */
86   void derive_equivalences (tree, tree, int);
87 };
88 
89 /* Track whether or not we have changed the control flow graph.  */
90 static bool cfg_altered;
91 
92 /* Bitmap of blocks that have had EH statements cleaned.  We should
93    remove their dead edges eventually.  */
94 static bitmap need_eh_cleanup;
95 static vec<gimple *> need_noreturn_fixup;
96 
97 /* Statistics for dominator optimizations.  */
98 struct opt_stats_d
99 {
100   long num_stmts;
101   long num_exprs_considered;
102   long num_re;
103   long num_const_prop;
104   long num_copy_prop;
105 };
106 
107 static struct opt_stats_d opt_stats;
108 
109 /* Local functions.  */
110 static void record_equality (tree, tree, class const_and_copies *);
111 static void record_equivalences_from_phis (basic_block);
112 static void record_equivalences_from_incoming_edge (basic_block,
113 						    class const_and_copies *,
114 						    class avail_exprs_stack *);
115 static void eliminate_redundant_computations (gimple_stmt_iterator *,
116 					      class const_and_copies *,
117 					      class avail_exprs_stack *);
118 static void record_equivalences_from_stmt (gimple *, int,
119 					   class avail_exprs_stack *);
120 static void dump_dominator_optimization_stats (FILE *file,
121 					       hash_table<expr_elt_hasher> *);
122 
123 /* Constructor for EDGE_INFO.  An EDGE_INFO instance is always
124    associated with an edge E.  */
125 
126 edge_info::edge_info (edge e)
127 {
128   /* Free the old one associated with E, if it exists and
129      associate our new object with E.  */
130   free_dom_edge_info (e);
131   e->aux = this;
132 
133   /* And initialize the embedded vectors.  */
134   simple_equivalences = vNULL;
135   cond_equivalences = vNULL;
136 }
137 
138 /* Destructor just needs to release the vectors.  */
139 
140 edge_info::~edge_info (void)
141 {
142   this->cond_equivalences.release ();
143   this->simple_equivalences.release ();
144 }
145 
146 /* NAME is known to have the value VALUE, which must be a constant.
147 
148    Walk through its use-def chain to see if there are other equivalences
149    we might be able to derive.
150 
151    RECURSION_LIMIT controls how far back we recurse through the use-def
152    chains.  */
153 
154 void
155 edge_info::derive_equivalences (tree name, tree value, int recursion_limit)
156 {
157   if (TREE_CODE (name) != SSA_NAME || TREE_CODE (value) != INTEGER_CST)
158     return;
159 
160   /* This records the equivalence for the toplevel object.  Do
161      this before checking the recursion limit.  */
162   simple_equivalences.safe_push (equiv_pair (name, value));
163 
164   /* Limit how far up the use-def chains we are willing to walk.  */
165   if (recursion_limit == 0)
166     return;
167 
168   /* We can walk up the use-def chains to potentially find more
169      equivalences.  */
170   gimple *def_stmt = SSA_NAME_DEF_STMT (name);
171   if (is_gimple_assign (def_stmt))
172     {
173       /* We know the result of DEF_STMT was zero.  See if that allows
174 	 us to deduce anything about the SSA_NAMEs used on the RHS.  */
175       enum tree_code code = gimple_assign_rhs_code (def_stmt);
176       switch (code)
177 	{
178 	case BIT_IOR_EXPR:
179 	  if (integer_zerop (value))
180 	    {
181 	      tree rhs1 = gimple_assign_rhs1 (def_stmt);
182 	      tree rhs2 = gimple_assign_rhs2 (def_stmt);
183 
184 	      value = build_zero_cst (TREE_TYPE (rhs1));
185 	      derive_equivalences (rhs1, value, recursion_limit - 1);
186 	      value = build_zero_cst (TREE_TYPE (rhs2));
187 	      derive_equivalences (rhs2, value, recursion_limit - 1);
188 	    }
189 	  break;
190 
191       /* We know the result of DEF_STMT was one.  See if that allows
192 	 us to deduce anything about the SSA_NAMEs used on the RHS.  */
193 	case BIT_AND_EXPR:
194 	  if (!integer_zerop (value))
195 	    {
196 	      tree rhs1 = gimple_assign_rhs1 (def_stmt);
197 	      tree rhs2 = gimple_assign_rhs2 (def_stmt);
198 
199 	      /* If either operand has a boolean range, then we
200 		 know its value must be one, otherwise we just know it
201 		 is nonzero.  The former is clearly useful, I haven't
202 		 seen cases where the latter is helpful yet.  */
203 	      if (TREE_CODE (rhs1) == SSA_NAME)
204 		{
205 		  if (ssa_name_has_boolean_range (rhs1))
206 		    {
207 		      value = build_one_cst (TREE_TYPE (rhs1));
208 		      derive_equivalences (rhs1, value, recursion_limit - 1);
209 		    }
210 		}
211 	      if (TREE_CODE (rhs2) == SSA_NAME)
212 		{
213 		  if (ssa_name_has_boolean_range (rhs2))
214 		    {
215 		      value = build_one_cst (TREE_TYPE (rhs2));
216 		      derive_equivalences (rhs2, value, recursion_limit - 1);
217 		    }
218 		}
219 	    }
220 	  break;
221 
222 	/* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
223 	   set via a widening type conversion, then we may be able to record
224 	   additional equivalences.  */
225 	case NOP_EXPR:
226 	case CONVERT_EXPR:
227 	  {
228 	    tree rhs = gimple_assign_rhs1 (def_stmt);
229 	    tree rhs_type = TREE_TYPE (rhs);
230 	    if (INTEGRAL_TYPE_P (rhs_type)
231 		&& (TYPE_PRECISION (TREE_TYPE (name))
232 		    >= TYPE_PRECISION (rhs_type))
233 		&& int_fits_type_p (value, rhs_type))
234 	      derive_equivalences (rhs,
235 				   fold_convert (rhs_type, value),
236 				   recursion_limit - 1);
237 	    break;
238 	  }
239 
240 	/* We can invert the operation of these codes trivially if
241 	   one of the RHS operands is a constant to produce a known
242 	   value for the other RHS operand.  */
243 	case POINTER_PLUS_EXPR:
244 	case PLUS_EXPR:
245 	  {
246 	    tree rhs1 = gimple_assign_rhs1 (def_stmt);
247 	    tree rhs2 = gimple_assign_rhs2 (def_stmt);
248 
249 	    /* If either argument is a constant, then we can compute
250 	       a constant value for the nonconstant argument.  */
251 	    if (TREE_CODE (rhs1) == INTEGER_CST
252 		&& TREE_CODE (rhs2) == SSA_NAME)
253 	      derive_equivalences (rhs2,
254 				   fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
255 						value, rhs1),
256 				   recursion_limit - 1);
257 	    else if (TREE_CODE (rhs2) == INTEGER_CST
258 		     && TREE_CODE (rhs1) == SSA_NAME)
259 	      derive_equivalences (rhs1,
260 				   fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
261 						value, rhs2),
262 				   recursion_limit - 1);
263 	    break;
264 	  }
265 
266 	/* If one of the operands is a constant, then we can compute
267 	   the value of the other operand.  If both operands are
268 	   SSA_NAMEs, then they must be equal if the result is zero.  */
269 	case MINUS_EXPR:
270 	  {
271 	    tree rhs1 = gimple_assign_rhs1 (def_stmt);
272 	    tree rhs2 = gimple_assign_rhs2 (def_stmt);
273 
274 	    /* If either argument is a constant, then we can compute
275 	       a constant value for the nonconstant argument.  */
276 	    if (TREE_CODE (rhs1) == INTEGER_CST
277 		&& TREE_CODE (rhs2) == SSA_NAME)
278 	      derive_equivalences (rhs2,
279 				   fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
280 						rhs1, value),
281 				   recursion_limit - 1);
282 	    else if (TREE_CODE (rhs2) == INTEGER_CST
283 		     && TREE_CODE (rhs1) == SSA_NAME)
284 	      derive_equivalences (rhs1,
285 				   fold_binary (PLUS_EXPR, TREE_TYPE (rhs1),
286 						value, rhs2),
287 				   recursion_limit - 1);
288 	    else if (integer_zerop (value))
289 	      {
290 		tree cond = build2 (EQ_EXPR, boolean_type_node,
291 				    gimple_assign_rhs1 (def_stmt),
292 				    gimple_assign_rhs2 (def_stmt));
293 		tree inverted = invert_truthvalue (cond);
294 		record_conditions (&this->cond_equivalences, cond, inverted);
295 	      }
296 	    break;
297 	  }
298 
299 
300 	case EQ_EXPR:
301 	case NE_EXPR:
302 	  {
303 	    if ((code == EQ_EXPR && integer_onep (value))
304 		|| (code == NE_EXPR && integer_zerop (value)))
305 	      {
306 		tree rhs1 = gimple_assign_rhs1 (def_stmt);
307 		tree rhs2 = gimple_assign_rhs2 (def_stmt);
308 
309 		/* If either argument is a constant, then record the
310 		   other argument as being the same as that constant.
311 
312 		   If neither operand is a constant, then we have a
313 		   conditional name == name equivalence.  */
314 		if (TREE_CODE (rhs1) == INTEGER_CST)
315 		  derive_equivalences (rhs2, rhs1, recursion_limit - 1);
316 		else if (TREE_CODE (rhs2) == INTEGER_CST)
317 		  derive_equivalences (rhs1, rhs2, recursion_limit - 1);
318 	      }
319 	    else
320 	      {
321 		tree cond = build2 (code, boolean_type_node,
322 				    gimple_assign_rhs1 (def_stmt),
323 				    gimple_assign_rhs2 (def_stmt));
324 		tree inverted = invert_truthvalue (cond);
325 		if (integer_zerop (value))
326 		  std::swap (cond, inverted);
327 		record_conditions (&this->cond_equivalences, cond, inverted);
328 	      }
329 	    break;
330 	  }
331 
332 	/* For BIT_NOT and NEGATE, we can just apply the operation to the
333 	   VALUE to get the new equivalence.  It will always be a constant
334 	   so we can recurse.  */
335 	case BIT_NOT_EXPR:
336 	case NEGATE_EXPR:
337 	  {
338 	    tree rhs = gimple_assign_rhs1 (def_stmt);
339 	    tree res = fold_build1 (code, TREE_TYPE (rhs), value);
340 	    derive_equivalences (rhs, res, recursion_limit - 1);
341 	    break;
342 	  }
343 
344 	default:
345 	  {
346 	    if (TREE_CODE_CLASS (code) == tcc_comparison)
347 	      {
348 		tree cond = build2 (code, boolean_type_node,
349 				    gimple_assign_rhs1 (def_stmt),
350 				    gimple_assign_rhs2 (def_stmt));
351 		tree inverted = invert_truthvalue (cond);
352 		if (integer_zerop (value))
353 		  std::swap (cond, inverted);
354 		record_conditions (&this->cond_equivalences, cond, inverted);
355 		break;
356 	      }
357 	    break;
358 	  }
359 	}
360     }
361 }
362 
363 void
364 edge_info::record_simple_equiv (tree lhs, tree rhs)
365 {
366   /* If the RHS is a constant, then we may be able to derive
367      further equivalences.  Else just record the name = name
368      equivalence.  */
369   if (TREE_CODE (rhs) == INTEGER_CST)
370     derive_equivalences (lhs, rhs, 4);
371   else
372     simple_equivalences.safe_push (equiv_pair (lhs, rhs));
373 }
374 
375 /* Free the edge_info data attached to E, if it exists.  */
376 
377 void
378 free_dom_edge_info (edge e)
379 {
380   class edge_info *edge_info = (struct edge_info *)e->aux;
381 
382   if (edge_info)
383     delete edge_info;
384 }
385 
386 /* Free all EDGE_INFO structures associated with edges in the CFG.
387    If a particular edge can be threaded, copy the redirection
388    target from the EDGE_INFO structure into the edge's AUX field
389    as required by code to update the CFG and SSA graph for
390    jump threading.  */
391 
392 static void
393 free_all_edge_infos (void)
394 {
395   basic_block bb;
396   edge_iterator ei;
397   edge e;
398 
399   FOR_EACH_BB_FN (bb, cfun)
400     {
401       FOR_EACH_EDGE (e, ei, bb->preds)
402         {
403 	  free_dom_edge_info (e);
404 	  e->aux = NULL;
405 	}
406     }
407 }
408 
409 /* We have finished optimizing BB, record any information implied by
410    taking a specific outgoing edge from BB.  */
411 
412 static void
413 record_edge_info (basic_block bb)
414 {
415   gimple_stmt_iterator gsi = gsi_last_bb (bb);
416   class edge_info *edge_info;
417 
418   if (! gsi_end_p (gsi))
419     {
420       gimple *stmt = gsi_stmt (gsi);
421       location_t loc = gimple_location (stmt);
422 
423       if (gimple_code (stmt) == GIMPLE_SWITCH)
424 	{
425 	  gswitch *switch_stmt = as_a <gswitch *> (stmt);
426 	  tree index = gimple_switch_index (switch_stmt);
427 
428 	  if (TREE_CODE (index) == SSA_NAME)
429 	    {
430 	      int i;
431               int n_labels = gimple_switch_num_labels (switch_stmt);
432 	      tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
433 	      edge e;
434 	      edge_iterator ei;
435 
436 	      for (i = 0; i < n_labels; i++)
437 		{
438 		  tree label = gimple_switch_label (switch_stmt, i);
439 		  basic_block target_bb = label_to_block (CASE_LABEL (label));
440 		  if (CASE_HIGH (label)
441 		      || !CASE_LOW (label)
442 		      || info[target_bb->index])
443 		    info[target_bb->index] = error_mark_node;
444 		  else
445 		    info[target_bb->index] = label;
446 		}
447 
448 	      FOR_EACH_EDGE (e, ei, bb->succs)
449 		{
450 		  basic_block target_bb = e->dest;
451 		  tree label = info[target_bb->index];
452 
453 		  if (label != NULL && label != error_mark_node)
454 		    {
455 		      tree x = fold_convert_loc (loc, TREE_TYPE (index),
456 						 CASE_LOW (label));
457 		      edge_info = new class edge_info (e);
458 		      edge_info->record_simple_equiv (index, x);
459 		    }
460 		}
461 	      free (info);
462 	    }
463 	}
464 
465       /* A COND_EXPR may create equivalences too.  */
466       if (gimple_code (stmt) == GIMPLE_COND)
467 	{
468 	  edge true_edge;
469 	  edge false_edge;
470 
471           tree op0 = gimple_cond_lhs (stmt);
472           tree op1 = gimple_cond_rhs (stmt);
473           enum tree_code code = gimple_cond_code (stmt);
474 
475 	  extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
476 
477           /* Special case comparing booleans against a constant as we
478              know the value of OP0 on both arms of the branch.  i.e., we
479              can record an equivalence for OP0 rather than COND.
480 
481 	     However, don't do this if the constant isn't zero or one.
482 	     Such conditionals will get optimized more thoroughly during
483 	     the domwalk.  */
484 	  if ((code == EQ_EXPR || code == NE_EXPR)
485 	      && TREE_CODE (op0) == SSA_NAME
486 	      && ssa_name_has_boolean_range (op0)
487 	      && is_gimple_min_invariant (op1)
488 	      && (integer_zerop (op1) || integer_onep (op1)))
489             {
490 	      tree true_val = constant_boolean_node (true, TREE_TYPE (op0));
491 	      tree false_val = constant_boolean_node (false, TREE_TYPE (op0));
492 
493               if (code == EQ_EXPR)
494                 {
495 		  edge_info = new class edge_info (true_edge);
496 		  edge_info->record_simple_equiv (op0,
497 						  (integer_zerop (op1)
498 						   ? false_val : true_val));
499 		  edge_info = new class edge_info (false_edge);
500 		  edge_info->record_simple_equiv (op0,
501 						  (integer_zerop (op1)
502 						   ? true_val : false_val));
503                 }
504               else
505                 {
506 		  edge_info = new class edge_info (true_edge);
507 		  edge_info->record_simple_equiv (op0,
508 						  (integer_zerop (op1)
509 						   ? true_val : false_val));
510 		  edge_info = new class edge_info (false_edge);
511 		  edge_info->record_simple_equiv (op0,
512 						  (integer_zerop (op1)
513 						   ? false_val : true_val));
514                 }
515             }
516 	  /* This can show up in the IL as a result of copy propagation
517 	     it will eventually be canonicalized, but we have to cope
518 	     with this case within the pass.  */
519           else if (is_gimple_min_invariant (op0)
520                    && TREE_CODE (op1) == SSA_NAME)
521             {
522               tree cond = build2 (code, boolean_type_node, op0, op1);
523               tree inverted = invert_truthvalue_loc (loc, cond);
524               bool can_infer_simple_equiv
525                 = !(HONOR_SIGNED_ZEROS (op0)
526                     && real_zerop (op0));
527               struct edge_info *edge_info;
528 
529 	      edge_info = new class edge_info (true_edge);
530               record_conditions (&edge_info->cond_equivalences, cond, inverted);
531 
532               if (can_infer_simple_equiv && code == EQ_EXPR)
533 		edge_info->record_simple_equiv (op1, op0);
534 
535 	      edge_info = new class edge_info (false_edge);
536               record_conditions (&edge_info->cond_equivalences, inverted, cond);
537 
538               if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
539 		edge_info->record_simple_equiv (op1, op0);
540             }
541 
542           else if (TREE_CODE (op0) == SSA_NAME
543                    && (TREE_CODE (op1) == SSA_NAME
544                        || is_gimple_min_invariant (op1)))
545             {
546               tree cond = build2 (code, boolean_type_node, op0, op1);
547               tree inverted = invert_truthvalue_loc (loc, cond);
548               bool can_infer_simple_equiv
549                 = !(HONOR_SIGNED_ZEROS (op1)
550                     && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
551               struct edge_info *edge_info;
552 
553 	      edge_info = new class edge_info (true_edge);
554               record_conditions (&edge_info->cond_equivalences, cond, inverted);
555 
556               if (can_infer_simple_equiv && code == EQ_EXPR)
557 		edge_info->record_simple_equiv (op0, op1);
558 
559 	      edge_info = new class edge_info (false_edge);
560               record_conditions (&edge_info->cond_equivalences, inverted, cond);
561 
562               if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
563 		edge_info->record_simple_equiv (op0, op1);
564             }
565         }
566     }
567 }
568 
569 
570 class dom_opt_dom_walker : public dom_walker
571 {
572 public:
573   dom_opt_dom_walker (cdi_direction direction,
574 		      class const_and_copies *const_and_copies,
575 		      class avail_exprs_stack *avail_exprs_stack,
576 		      gcond *dummy_cond)
577     : dom_walker (direction, REACHABLE_BLOCKS),
578       m_const_and_copies (const_and_copies),
579       m_avail_exprs_stack (avail_exprs_stack),
580       m_dummy_cond (dummy_cond) { }
581 
582   virtual edge before_dom_children (basic_block);
583   virtual void after_dom_children (basic_block);
584 
585 private:
586 
587   /* Unwindable equivalences, both const/copy and expression varieties.  */
588   class const_and_copies *m_const_and_copies;
589   class avail_exprs_stack *m_avail_exprs_stack;
590 
591   /* VRP data.  */
592   class evrp_range_analyzer evrp_range_analyzer;
593 
594   /* Dummy condition to avoid creating lots of throw away statements.  */
595   gcond *m_dummy_cond;
596 
597   /* Optimize a single statement within a basic block using the
598      various tables mantained by DOM.  Returns the taken edge if
599      the statement is a conditional with a statically determined
600      value.  */
601   edge optimize_stmt (basic_block, gimple_stmt_iterator);
602 };
603 
604 /* Jump threading, redundancy elimination and const/copy propagation.
605 
606    This pass may expose new symbols that need to be renamed into SSA.  For
607    every new symbol exposed, its corresponding bit will be set in
608    VARS_TO_RENAME.  */
609 
610 namespace {
611 
612 const pass_data pass_data_dominator =
613 {
614   GIMPLE_PASS, /* type */
615   "dom", /* name */
616   OPTGROUP_NONE, /* optinfo_flags */
617   TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
618   ( PROP_cfg | PROP_ssa ), /* properties_required */
619   0, /* properties_provided */
620   0, /* properties_destroyed */
621   0, /* todo_flags_start */
622   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
623 };
624 
625 class pass_dominator : public gimple_opt_pass
626 {
627 public:
628   pass_dominator (gcc::context *ctxt)
629     : gimple_opt_pass (pass_data_dominator, ctxt),
630       may_peel_loop_headers_p (false)
631   {}
632 
633   /* opt_pass methods: */
634   opt_pass * clone () { return new pass_dominator (m_ctxt); }
635   void set_pass_param (unsigned int n, bool param)
636     {
637       gcc_assert (n == 0);
638       may_peel_loop_headers_p = param;
639     }
640   virtual bool gate (function *) { return flag_tree_dom != 0; }
641   virtual unsigned int execute (function *);
642 
643  private:
644   /* This flag is used to prevent loops from being peeled repeatedly in jump
645      threading; it will be removed once we preserve loop structures throughout
646      the compilation -- we will be able to mark the affected loops directly in
647      jump threading, and avoid peeling them next time.  */
648   bool may_peel_loop_headers_p;
649 }; // class pass_dominator
650 
651 unsigned int
652 pass_dominator::execute (function *fun)
653 {
654   memset (&opt_stats, 0, sizeof (opt_stats));
655 
656   /* Create our hash tables.  */
657   hash_table<expr_elt_hasher> *avail_exprs
658     = new hash_table<expr_elt_hasher> (1024);
659   class avail_exprs_stack *avail_exprs_stack
660     = new class avail_exprs_stack (avail_exprs);
661   class const_and_copies *const_and_copies = new class const_and_copies ();
662   need_eh_cleanup = BITMAP_ALLOC (NULL);
663   need_noreturn_fixup.create (0);
664 
665   calculate_dominance_info (CDI_DOMINATORS);
666   cfg_altered = false;
667 
668   /* We need to know loop structures in order to avoid destroying them
669      in jump threading.  Note that we still can e.g. thread through loop
670      headers to an exit edge, or through loop header to the loop body, assuming
671      that we update the loop info.
672 
673      TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
674      to several overly conservative bail-outs in jump threading, case
675      gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
676      missing.  We should improve jump threading in future then
677      LOOPS_HAVE_PREHEADERS won't be needed here.  */
678   loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
679 
680   /* Initialize the value-handle array.  */
681   threadedge_initialize_values ();
682 
683   /* We need accurate information regarding back edges in the CFG
684      for jump threading; this may include back edges that are not part of
685      a single loop.  */
686   mark_dfs_back_edges ();
687 
688   /* We want to create the edge info structures before the dominator walk
689      so that they'll be in place for the jump threader, particularly when
690      threading through a join block.
691 
692      The conditions will be lazily updated with global equivalences as
693      we reach them during the dominator walk.  */
694   basic_block bb;
695   FOR_EACH_BB_FN (bb, fun)
696     record_edge_info (bb);
697 
698   gcond *dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node,
699 					 integer_zero_node, NULL, NULL);
700 
701   /* Recursively walk the dominator tree optimizing statements.  */
702   dom_opt_dom_walker walker (CDI_DOMINATORS, const_and_copies,
703 			     avail_exprs_stack, dummy_cond);
704   walker.walk (fun->cfg->x_entry_block_ptr);
705 
706   /* Look for blocks where we cleared EDGE_EXECUTABLE on an outgoing
707      edge.  When found, remove jump threads which contain any outgoing
708      edge from the affected block.  */
709   if (cfg_altered)
710     {
711       FOR_EACH_BB_FN (bb, fun)
712 	{
713 	  edge_iterator ei;
714 	  edge e;
715 
716 	  /* First see if there are any edges without EDGE_EXECUTABLE
717 	     set.  */
718 	  bool found = false;
719 	  FOR_EACH_EDGE (e, ei, bb->succs)
720 	    {
721 	      if ((e->flags & EDGE_EXECUTABLE) == 0)
722 		{
723 		  found = true;
724 		  break;
725 		}
726 	    }
727 
728 	  /* If there were any such edges found, then remove jump threads
729 	     containing any edge leaving BB.  */
730 	  if (found)
731 	    FOR_EACH_EDGE (e, ei, bb->succs)
732 	      remove_jump_threads_including (e);
733 	}
734     }
735 
736   {
737     gimple_stmt_iterator gsi;
738     basic_block bb;
739     FOR_EACH_BB_FN (bb, fun)
740       {
741 	for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
742 	  update_stmt_if_modified (gsi_stmt (gsi));
743       }
744   }
745 
746   /* If we exposed any new variables, go ahead and put them into
747      SSA form now, before we handle jump threading.  This simplifies
748      interactions between rewriting of _DECL nodes into SSA form
749      and rewriting SSA_NAME nodes into SSA form after block
750      duplication and CFG manipulation.  */
751   update_ssa (TODO_update_ssa);
752 
753   free_all_edge_infos ();
754 
755   /* Thread jumps, creating duplicate blocks as needed.  */
756   cfg_altered |= thread_through_all_blocks (may_peel_loop_headers_p);
757 
758   if (cfg_altered)
759     free_dominance_info (CDI_DOMINATORS);
760 
761   /* Removal of statements may make some EH edges dead.  Purge
762      such edges from the CFG as needed.  */
763   if (!bitmap_empty_p (need_eh_cleanup))
764     {
765       unsigned i;
766       bitmap_iterator bi;
767 
768       /* Jump threading may have created forwarder blocks from blocks
769 	 needing EH cleanup; the new successor of these blocks, which
770 	 has inherited from the original block, needs the cleanup.
771 	 Don't clear bits in the bitmap, as that can break the bitmap
772 	 iterator.  */
773       EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
774 	{
775 	  basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
776 	  if (bb == NULL)
777 	    continue;
778 	  while (single_succ_p (bb)
779 		 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
780 	    bb = single_succ (bb);
781 	  if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
782 	    continue;
783 	  if ((unsigned) bb->index != i)
784 	    bitmap_set_bit (need_eh_cleanup, bb->index);
785 	}
786 
787       gimple_purge_all_dead_eh_edges (need_eh_cleanup);
788       bitmap_clear (need_eh_cleanup);
789     }
790 
791   /* Fixup stmts that became noreturn calls.  This may require splitting
792      blocks and thus isn't possible during the dominator walk or before
793      jump threading finished.  Do this in reverse order so we don't
794      inadvertedly remove a stmt we want to fixup by visiting a dominating
795      now noreturn call first.  */
796   while (!need_noreturn_fixup.is_empty ())
797     {
798       gimple *stmt = need_noreturn_fixup.pop ();
799       if (dump_file && dump_flags & TDF_DETAILS)
800 	{
801 	  fprintf (dump_file, "Fixing up noreturn call ");
802 	  print_gimple_stmt (dump_file, stmt, 0);
803 	  fprintf (dump_file, "\n");
804 	}
805       fixup_noreturn_call (stmt);
806     }
807 
808   statistics_counter_event (fun, "Redundant expressions eliminated",
809 			    opt_stats.num_re);
810   statistics_counter_event (fun, "Constants propagated",
811 			    opt_stats.num_const_prop);
812   statistics_counter_event (fun, "Copies propagated",
813 			    opt_stats.num_copy_prop);
814 
815   /* Debugging dumps.  */
816   if (dump_file && (dump_flags & TDF_STATS))
817     dump_dominator_optimization_stats (dump_file, avail_exprs);
818 
819   loop_optimizer_finalize ();
820 
821   /* Delete our main hashtable.  */
822   delete avail_exprs;
823   avail_exprs = NULL;
824 
825   /* Free asserted bitmaps and stacks.  */
826   BITMAP_FREE (need_eh_cleanup);
827   need_noreturn_fixup.release ();
828   delete avail_exprs_stack;
829   delete const_and_copies;
830 
831   /* Free the value-handle array.  */
832   threadedge_finalize_values ();
833 
834   return 0;
835 }
836 
837 } // anon namespace
838 
839 gimple_opt_pass *
840 make_pass_dominator (gcc::context *ctxt)
841 {
842   return new pass_dominator (ctxt);
843 }
844 
845 /* A hack until we remove threading from tree-vrp.c and bring the
846    simplification routine into the dom_opt_dom_walker class.  */
847 static class vr_values *x_vr_values;
848 
849 /* A trivial wrapper so that we can present the generic jump
850    threading code with a simple API for simplifying statements.  */
851 static tree
852 simplify_stmt_for_jump_threading (gimple *stmt,
853 				  gimple *within_stmt ATTRIBUTE_UNUSED,
854 				  class avail_exprs_stack *avail_exprs_stack,
855 				  basic_block bb ATTRIBUTE_UNUSED)
856 {
857   /* First query our hash table to see if the the expression is available
858      there.  A non-NULL return value will be either a constant or another
859      SSA_NAME.  */
860   tree cached_lhs =  avail_exprs_stack->lookup_avail_expr (stmt, false, true);
861   if (cached_lhs)
862     return cached_lhs;
863 
864   /* If the hash table query failed, query VRP information.  This is
865      essentially the same as tree-vrp's simplification routine.  The
866      copy in tree-vrp is scheduled for removal in gcc-9.  */
867   if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
868     {
869       cached_lhs
870 	= x_vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
871 						 gimple_cond_lhs (cond_stmt),
872 						 gimple_cond_rhs (cond_stmt),
873 						 within_stmt);
874       return cached_lhs;
875     }
876 
877   if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
878     {
879       tree op = gimple_switch_index (switch_stmt);
880       if (TREE_CODE (op) != SSA_NAME)
881 	return NULL_TREE;
882 
883       value_range *vr = x_vr_values->get_value_range (op);
884       if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
885 	  || symbolic_range_p (vr))
886 	return NULL_TREE;
887 
888       if (vr->type == VR_RANGE)
889 	{
890 	  size_t i, j;
891 
892 	  find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
893 
894 	  if (i == j)
895 	    {
896 	      tree label = gimple_switch_label (switch_stmt, i);
897 
898 	      if (CASE_HIGH (label) != NULL_TREE
899 		  ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
900 		     && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
901 		  : (tree_int_cst_equal (CASE_LOW (label), vr->min)
902 		     && tree_int_cst_equal (vr->min, vr->max)))
903 		return label;
904 
905 	      if (i > j)
906 		return gimple_switch_label (switch_stmt, 0);
907 	    }
908 	}
909 
910       if (vr->type == VR_ANTI_RANGE)
911           {
912             unsigned n = gimple_switch_num_labels (switch_stmt);
913             tree min_label = gimple_switch_label (switch_stmt, 1);
914             tree max_label = gimple_switch_label (switch_stmt, n - 1);
915 
916             /* The default label will be taken only if the anti-range of the
917                operand is entirely outside the bounds of all the (non-default)
918                case labels.  */
919             if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
920                 && (CASE_HIGH (max_label) != NULL_TREE
921                     ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
922                     : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
923             return gimple_switch_label (switch_stmt, 0);
924           }
925 	return NULL_TREE;
926     }
927 
928   if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
929     {
930       tree lhs = gimple_assign_lhs (assign_stmt);
931       if (TREE_CODE (lhs) == SSA_NAME
932 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
933 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
934 	  && stmt_interesting_for_vrp (stmt))
935 	{
936 	  edge dummy_e;
937 	  tree dummy_tree;
938 	  value_range new_vr = VR_INITIALIZER;
939 	  x_vr_values->extract_range_from_stmt (stmt, &dummy_e,
940 					      &dummy_tree, &new_vr);
941 	  if (range_int_cst_singleton_p (&new_vr))
942 	    return new_vr.min;
943 	}
944     }
945   return NULL;
946 }
947 
948 /* Valueize hook for gimple_fold_stmt_to_constant_1.  */
949 
950 static tree
951 dom_valueize (tree t)
952 {
953   if (TREE_CODE (t) == SSA_NAME)
954     {
955       tree tem = SSA_NAME_VALUE (t);
956       if (tem)
957 	return tem;
958     }
959   return t;
960 }
961 
962 /* We have just found an equivalence for LHS on an edge E.
963    Look backwards to other uses of LHS and see if we can derive
964    additional equivalences that are valid on edge E.  */
965 static void
966 back_propagate_equivalences (tree lhs, edge e,
967 			     class const_and_copies *const_and_copies)
968 {
969   use_operand_p use_p;
970   imm_use_iterator iter;
971   bitmap domby = NULL;
972   basic_block dest = e->dest;
973 
974   /* Iterate over the uses of LHS to see if any dominate E->dest.
975      If so, they may create useful equivalences too.
976 
977      ???  If the code gets re-organized to a worklist to catch more
978      indirect opportunities and it is made to handle PHIs then this
979      should only consider use_stmts in basic-blocks we have already visited.  */
980   FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
981     {
982       gimple *use_stmt = USE_STMT (use_p);
983 
984       /* Often the use is in DEST, which we trivially know we can't use.
985 	 This is cheaper than the dominator set tests below.  */
986       if (dest == gimple_bb (use_stmt))
987 	continue;
988 
989       /* Filter out statements that can never produce a useful
990 	 equivalence.  */
991       tree lhs2 = gimple_get_lhs (use_stmt);
992       if (!lhs2 || TREE_CODE (lhs2) != SSA_NAME)
993 	continue;
994 
995       /* Profiling has shown the domination tests here can be fairly
996 	 expensive.  We get significant improvements by building the
997 	 set of blocks that dominate BB.  We can then just test
998 	 for set membership below.
999 
1000 	 We also initialize the set lazily since often the only uses
1001 	 are going to be in the same block as DEST.  */
1002       if (!domby)
1003 	{
1004 	  domby = BITMAP_ALLOC (NULL);
1005 	  basic_block bb = get_immediate_dominator (CDI_DOMINATORS, dest);
1006 	  while (bb)
1007 	    {
1008 	      bitmap_set_bit (domby, bb->index);
1009 	      bb = get_immediate_dominator (CDI_DOMINATORS, bb);
1010 	    }
1011 	}
1012 
1013       /* This tests if USE_STMT does not dominate DEST.  */
1014       if (!bitmap_bit_p (domby, gimple_bb (use_stmt)->index))
1015 	continue;
1016 
1017       /* At this point USE_STMT dominates DEST and may result in a
1018 	 useful equivalence.  Try to simplify its RHS to a constant
1019 	 or SSA_NAME.  */
1020       tree res = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize,
1021 						 no_follow_ssa_edges);
1022       if (res && (TREE_CODE (res) == SSA_NAME || is_gimple_min_invariant (res)))
1023 	record_equality (lhs2, res, const_and_copies);
1024     }
1025 
1026   if (domby)
1027     BITMAP_FREE (domby);
1028 }
1029 
1030 /* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied
1031    by traversing edge E (which are cached in E->aux).
1032 
1033    Callers are responsible for managing the unwinding markers.  */
1034 void
1035 record_temporary_equivalences (edge e,
1036 			       class const_and_copies *const_and_copies,
1037 			       class avail_exprs_stack *avail_exprs_stack)
1038 {
1039   int i;
1040   class edge_info *edge_info = (class edge_info *) e->aux;
1041 
1042   /* If we have info associated with this edge, record it into
1043      our equivalence tables.  */
1044   if (edge_info)
1045     {
1046       cond_equivalence *eq;
1047       /* If we have 0 = COND or 1 = COND equivalences, record them
1048 	 into our expression hash tables.  */
1049       for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1050 	avail_exprs_stack->record_cond (eq);
1051 
1052       edge_info::equiv_pair *seq;
1053       for (i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
1054 	{
1055 	  tree lhs = seq->first;
1056 	  if (!lhs || TREE_CODE (lhs) != SSA_NAME)
1057 	    continue;
1058 
1059 	  /* Record the simple NAME = VALUE equivalence.  */
1060 	  tree rhs = seq->second;
1061 
1062 	  /* If this is a SSA_NAME = SSA_NAME equivalence and one operand is
1063 	     cheaper to compute than the other, then set up the equivalence
1064 	     such that we replace the expensive one with the cheap one.
1065 
1066 	     If they are the same cost to compute, then do not record
1067 	     anything.  */
1068 	  if (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs) == SSA_NAME)
1069 	    {
1070 	      gimple *rhs_def = SSA_NAME_DEF_STMT (rhs);
1071 	      int rhs_cost = estimate_num_insns (rhs_def, &eni_size_weights);
1072 
1073 	      gimple *lhs_def = SSA_NAME_DEF_STMT (lhs);
1074 	      int lhs_cost = estimate_num_insns (lhs_def, &eni_size_weights);
1075 
1076 	      if (rhs_cost > lhs_cost)
1077 	        record_equality (rhs, lhs, const_and_copies);
1078 	      else if (rhs_cost < lhs_cost)
1079 	        record_equality (lhs, rhs, const_and_copies);
1080 	    }
1081 	  else
1082 	    record_equality (lhs, rhs, const_and_copies);
1083 
1084 
1085 	  /* Any equivalence found for LHS may result in additional
1086 	     equivalences for other uses of LHS that we have already
1087 	     processed.  */
1088 	  back_propagate_equivalences (lhs, e, const_and_copies);
1089 	}
1090     }
1091 }
1092 
1093 /* PHI nodes can create equivalences too.
1094 
1095    Ignoring any alternatives which are the same as the result, if
1096    all the alternatives are equal, then the PHI node creates an
1097    equivalence.  */
1098 
1099 static void
1100 record_equivalences_from_phis (basic_block bb)
1101 {
1102   gphi_iterator gsi;
1103 
1104   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1105     {
1106       gphi *phi = gsi.phi ();
1107 
1108       tree lhs = gimple_phi_result (phi);
1109       tree rhs = NULL;
1110       size_t i;
1111 
1112       for (i = 0; i < gimple_phi_num_args (phi); i++)
1113 	{
1114 	  tree t = gimple_phi_arg_def (phi, i);
1115 
1116 	  /* Ignore alternatives which are the same as our LHS.  Since
1117 	     LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1118 	     can simply compare pointers.  */
1119 	  if (lhs == t)
1120 	    continue;
1121 
1122 	  /* If the associated edge is not marked as executable, then it
1123 	     can be ignored.  */
1124 	  if ((gimple_phi_arg_edge (phi, i)->flags & EDGE_EXECUTABLE) == 0)
1125 	    continue;
1126 
1127 	  t = dom_valueize (t);
1128 
1129 	  /* If T is an SSA_NAME and its associated edge is a backedge,
1130 	     then quit as we can not utilize this equivalence.  */
1131 	  if (TREE_CODE (t) == SSA_NAME
1132 	      && (gimple_phi_arg_edge (phi, i)->flags & EDGE_DFS_BACK))
1133 	    break;
1134 
1135 	  /* If we have not processed an alternative yet, then set
1136 	     RHS to this alternative.  */
1137 	  if (rhs == NULL)
1138 	    rhs = t;
1139 	  /* If we have processed an alternative (stored in RHS), then
1140 	     see if it is equal to this one.  If it isn't, then stop
1141 	     the search.  */
1142 	  else if (! operand_equal_for_phi_arg_p (rhs, t))
1143 	    break;
1144 	}
1145 
1146       /* If we had no interesting alternatives, then all the RHS alternatives
1147 	 must have been the same as LHS.  */
1148       if (!rhs)
1149 	rhs = lhs;
1150 
1151       /* If we managed to iterate through each PHI alternative without
1152 	 breaking out of the loop, then we have a PHI which may create
1153 	 a useful equivalence.  We do not need to record unwind data for
1154 	 this, since this is a true assignment and not an equivalence
1155 	 inferred from a comparison.  All uses of this ssa name are dominated
1156 	 by this assignment, so unwinding just costs time and space.  */
1157       if (i == gimple_phi_num_args (phi)
1158 	  && may_propagate_copy (lhs, rhs))
1159 	set_ssa_name_value (lhs, rhs);
1160     }
1161 }
1162 
1163 /* Record any equivalences created by the incoming edge to BB into
1164    CONST_AND_COPIES and AVAIL_EXPRS_STACK.  If BB has more than one
1165    incoming edge, then no equivalence is created.  */
1166 
1167 static void
1168 record_equivalences_from_incoming_edge (basic_block bb,
1169     class const_and_copies *const_and_copies,
1170     class avail_exprs_stack *avail_exprs_stack)
1171 {
1172   edge e;
1173   basic_block parent;
1174 
1175   /* If our parent block ended with a control statement, then we may be
1176      able to record some equivalences based on which outgoing edge from
1177      the parent was followed.  */
1178   parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1179 
1180   e = single_pred_edge_ignoring_loop_edges (bb, true);
1181 
1182   /* If we had a single incoming edge from our parent block, then enter
1183      any data associated with the edge into our tables.  */
1184   if (e && e->src == parent)
1185     record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
1186 }
1187 
1188 /* Dump statistics for the hash table HTAB.  */
1189 
1190 static void
1191 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1192 {
1193   fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1194 	   (long) htab.size (),
1195 	   (long) htab.elements (),
1196 	   htab.collisions ());
1197 }
1198 
1199 /* Dump SSA statistics on FILE.  */
1200 
1201 static void
1202 dump_dominator_optimization_stats (FILE *file,
1203 				   hash_table<expr_elt_hasher> *avail_exprs)
1204 {
1205   fprintf (file, "Total number of statements:                   %6ld\n\n",
1206 	   opt_stats.num_stmts);
1207   fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1208            opt_stats.num_exprs_considered);
1209 
1210   fprintf (file, "\nHash table statistics:\n");
1211 
1212   fprintf (file, "    avail_exprs: ");
1213   htab_statistics (file, *avail_exprs);
1214 }
1215 
1216 
1217 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1218    This constrains the cases in which we may treat this as assignment.  */
1219 
1220 static void
1221 record_equality (tree x, tree y, class const_and_copies *const_and_copies)
1222 {
1223   tree prev_x = NULL, prev_y = NULL;
1224 
1225   if (tree_swap_operands_p (x, y))
1226     std::swap (x, y);
1227 
1228   /* Most of the time tree_swap_operands_p does what we want.  But there
1229      are cases where we know one operand is better for copy propagation than
1230      the other.  Given no other code cares about ordering of equality
1231      comparison operators for that purpose, we just handle the special cases
1232      here.  */
1233   if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1234     {
1235       /* If one operand is a single use operand, then make it
1236 	 X.  This will preserve its single use properly and if this
1237 	 conditional is eliminated, the computation of X can be
1238 	 eliminated as well.  */
1239       if (has_single_use (y) && ! has_single_use (x))
1240 	std::swap (x, y);
1241     }
1242   if (TREE_CODE (x) == SSA_NAME)
1243     prev_x = SSA_NAME_VALUE (x);
1244   if (TREE_CODE (y) == SSA_NAME)
1245     prev_y = SSA_NAME_VALUE (y);
1246 
1247   /* If one of the previous values is invariant, or invariant in more loops
1248      (by depth), then use that.
1249      Otherwise it doesn't matter which value we choose, just so
1250      long as we canonicalize on one value.  */
1251   if (is_gimple_min_invariant (y))
1252     ;
1253   else if (is_gimple_min_invariant (x))
1254     prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1255   else if (prev_x && is_gimple_min_invariant (prev_x))
1256     x = y, y = prev_x, prev_x = prev_y;
1257   else if (prev_y)
1258     y = prev_y;
1259 
1260   /* After the swapping, we must have one SSA_NAME.  */
1261   if (TREE_CODE (x) != SSA_NAME)
1262     return;
1263 
1264   /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1265      variable compared against zero.  If we're honoring signed zeros,
1266      then we cannot record this value unless we know that the value is
1267      nonzero.  */
1268   if (HONOR_SIGNED_ZEROS (x)
1269       && (TREE_CODE (y) != REAL_CST
1270 	  || real_equal (&dconst0, &TREE_REAL_CST (y))))
1271     return;
1272 
1273   const_and_copies->record_const_or_copy (x, y, prev_x);
1274 }
1275 
1276 /* Returns true when STMT is a simple iv increment.  It detects the
1277    following situation:
1278 
1279    i_1 = phi (..., i_k)
1280    [...]
1281    i_j = i_{j-1}  for each j : 2 <= j <= k-1
1282    [...]
1283    i_k = i_{k-1} +/- ...  */
1284 
1285 bool
1286 simple_iv_increment_p (gimple *stmt)
1287 {
1288   enum tree_code code;
1289   tree lhs, preinc;
1290   gimple *phi;
1291   size_t i;
1292 
1293   if (gimple_code (stmt) != GIMPLE_ASSIGN)
1294     return false;
1295 
1296   lhs = gimple_assign_lhs (stmt);
1297   if (TREE_CODE (lhs) != SSA_NAME)
1298     return false;
1299 
1300   code = gimple_assign_rhs_code (stmt);
1301   if (code != PLUS_EXPR
1302       && code != MINUS_EXPR
1303       && code != POINTER_PLUS_EXPR)
1304     return false;
1305 
1306   preinc = gimple_assign_rhs1 (stmt);
1307   if (TREE_CODE (preinc) != SSA_NAME)
1308     return false;
1309 
1310   phi = SSA_NAME_DEF_STMT (preinc);
1311   while (gimple_code (phi) != GIMPLE_PHI)
1312     {
1313       /* Follow trivial copies, but not the DEF used in a back edge,
1314 	 so that we don't prevent coalescing.  */
1315       if (!gimple_assign_ssa_name_copy_p (phi))
1316 	return false;
1317       preinc = gimple_assign_rhs1 (phi);
1318       phi = SSA_NAME_DEF_STMT (preinc);
1319     }
1320 
1321   for (i = 0; i < gimple_phi_num_args (phi); i++)
1322     if (gimple_phi_arg_def (phi, i) == lhs)
1323       return true;
1324 
1325   return false;
1326 }
1327 
1328 /* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the
1329    successors of BB.  */
1330 
1331 static void
1332 cprop_into_successor_phis (basic_block bb,
1333 			   class const_and_copies *const_and_copies)
1334 {
1335   edge e;
1336   edge_iterator ei;
1337 
1338   FOR_EACH_EDGE (e, ei, bb->succs)
1339     {
1340       int indx;
1341       gphi_iterator gsi;
1342 
1343       /* If this is an abnormal edge, then we do not want to copy propagate
1344 	 into the PHI alternative associated with this edge.  */
1345       if (e->flags & EDGE_ABNORMAL)
1346 	continue;
1347 
1348       gsi = gsi_start_phis (e->dest);
1349       if (gsi_end_p (gsi))
1350 	continue;
1351 
1352       /* We may have an equivalence associated with this edge.  While
1353 	 we can not propagate it into non-dominated blocks, we can
1354 	 propagate them into PHIs in non-dominated blocks.  */
1355 
1356       /* Push the unwind marker so we can reset the const and copies
1357 	 table back to its original state after processing this edge.  */
1358       const_and_copies->push_marker ();
1359 
1360       /* Extract and record any simple NAME = VALUE equivalences.
1361 
1362 	 Don't bother with [01] = COND equivalences, they're not useful
1363 	 here.  */
1364       class edge_info *edge_info = (class edge_info *) e->aux;
1365 
1366       if (edge_info)
1367 	{
1368 	  edge_info::equiv_pair *seq;
1369 	  for (int i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
1370 	    {
1371 	      tree lhs = seq->first;
1372 	      tree rhs = seq->second;
1373 
1374 	      if (lhs && TREE_CODE (lhs) == SSA_NAME)
1375 		const_and_copies->record_const_or_copy (lhs, rhs);
1376 	    }
1377 
1378 	}
1379 
1380       indx = e->dest_idx;
1381       for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1382 	{
1383 	  tree new_val;
1384 	  use_operand_p orig_p;
1385 	  tree orig_val;
1386           gphi *phi = gsi.phi ();
1387 
1388 	  /* The alternative may be associated with a constant, so verify
1389 	     it is an SSA_NAME before doing anything with it.  */
1390 	  orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1391 	  orig_val = get_use_from_ptr (orig_p);
1392 	  if (TREE_CODE (orig_val) != SSA_NAME)
1393 	    continue;
1394 
1395 	  /* If we have *ORIG_P in our constant/copy table, then replace
1396 	     ORIG_P with its value in our constant/copy table.  */
1397 	  new_val = SSA_NAME_VALUE (orig_val);
1398 	  if (new_val
1399 	      && new_val != orig_val
1400 	      && may_propagate_copy (orig_val, new_val))
1401 	    propagate_value (orig_p, new_val);
1402 	}
1403 
1404       const_and_copies->pop_to_marker ();
1405     }
1406 }
1407 
1408 edge
1409 dom_opt_dom_walker::before_dom_children (basic_block bb)
1410 {
1411   gimple_stmt_iterator gsi;
1412 
1413   if (dump_file && (dump_flags & TDF_DETAILS))
1414     fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1415 
1416   evrp_range_analyzer.enter (bb);
1417 
1418   /* Push a marker on the stacks of local information so that we know how
1419      far to unwind when we finalize this block.  */
1420   m_avail_exprs_stack->push_marker ();
1421   m_const_and_copies->push_marker ();
1422 
1423   record_equivalences_from_incoming_edge (bb, m_const_and_copies,
1424 					  m_avail_exprs_stack);
1425 
1426   /* PHI nodes can create equivalences too.  */
1427   record_equivalences_from_phis (bb);
1428 
1429   /* Create equivalences from redundant PHIs.  PHIs are only truly
1430      redundant when they exist in the same block, so push another
1431      marker and unwind right afterwards.  */
1432   m_avail_exprs_stack->push_marker ();
1433   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1434     eliminate_redundant_computations (&gsi, m_const_and_copies,
1435 				      m_avail_exprs_stack);
1436   m_avail_exprs_stack->pop_to_marker ();
1437 
1438   edge taken_edge = NULL;
1439   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1440     {
1441       evrp_range_analyzer.record_ranges_from_stmt (gsi_stmt (gsi), false);
1442       taken_edge = this->optimize_stmt (bb, gsi);
1443     }
1444 
1445   /* Now prepare to process dominated blocks.  */
1446   record_edge_info (bb);
1447   cprop_into_successor_phis (bb, m_const_and_copies);
1448   if (taken_edge && !dbg_cnt (dom_unreachable_edges))
1449     return NULL;
1450 
1451   return taken_edge;
1452 }
1453 
1454 /* We have finished processing the dominator children of BB, perform
1455    any finalization actions in preparation for leaving this node in
1456    the dominator tree.  */
1457 
1458 void
1459 dom_opt_dom_walker::after_dom_children (basic_block bb)
1460 {
1461   x_vr_values = evrp_range_analyzer.get_vr_values ();
1462   thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
1463 			 m_avail_exprs_stack,
1464 			 &evrp_range_analyzer,
1465 			 simplify_stmt_for_jump_threading);
1466   x_vr_values = NULL;
1467 
1468   /* These remove expressions local to BB from the tables.  */
1469   m_avail_exprs_stack->pop_to_marker ();
1470   m_const_and_copies->pop_to_marker ();
1471   evrp_range_analyzer.leave (bb);
1472 }
1473 
1474 /* Search for redundant computations in STMT.  If any are found, then
1475    replace them with the variable holding the result of the computation.
1476 
1477    If safe, record this expression into AVAIL_EXPRS_STACK and
1478    CONST_AND_COPIES.  */
1479 
1480 static void
1481 eliminate_redundant_computations (gimple_stmt_iterator* gsi,
1482 				  class const_and_copies *const_and_copies,
1483 				  class avail_exprs_stack *avail_exprs_stack)
1484 {
1485   tree expr_type;
1486   tree cached_lhs;
1487   tree def;
1488   bool insert = true;
1489   bool assigns_var_p = false;
1490 
1491   gimple *stmt = gsi_stmt (*gsi);
1492 
1493   if (gimple_code (stmt) == GIMPLE_PHI)
1494     def = gimple_phi_result (stmt);
1495   else
1496     def = gimple_get_lhs (stmt);
1497 
1498   /* Certain expressions on the RHS can be optimized away, but can not
1499      themselves be entered into the hash tables.  */
1500   if (! def
1501       || TREE_CODE (def) != SSA_NAME
1502       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1503       || gimple_vdef (stmt)
1504       /* Do not record equivalences for increments of ivs.  This would create
1505 	 overlapping live ranges for a very questionable gain.  */
1506       || simple_iv_increment_p (stmt))
1507     insert = false;
1508 
1509   /* Check if the expression has been computed before.  */
1510   cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, insert, true);
1511 
1512   opt_stats.num_exprs_considered++;
1513 
1514   /* Get the type of the expression we are trying to optimize.  */
1515   if (is_gimple_assign (stmt))
1516     {
1517       expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1518       assigns_var_p = true;
1519     }
1520   else if (gimple_code (stmt) == GIMPLE_COND)
1521     expr_type = boolean_type_node;
1522   else if (is_gimple_call (stmt))
1523     {
1524       gcc_assert (gimple_call_lhs (stmt));
1525       expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1526       assigns_var_p = true;
1527     }
1528   else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1529     expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
1530   else if (gimple_code (stmt) == GIMPLE_PHI)
1531     /* We can't propagate into a phi, so the logic below doesn't apply.
1532        Instead record an equivalence between the cached LHS and the
1533        PHI result of this statement, provided they are in the same block.
1534        This should be sufficient to kill the redundant phi.  */
1535     {
1536       if (def && cached_lhs)
1537 	const_and_copies->record_const_or_copy (def, cached_lhs);
1538       return;
1539     }
1540   else
1541     gcc_unreachable ();
1542 
1543   if (!cached_lhs)
1544     return;
1545 
1546   /* It is safe to ignore types here since we have already done
1547      type checking in the hashing and equality routines.  In fact
1548      type checking here merely gets in the way of constant
1549      propagation.  Also, make sure that it is safe to propagate
1550      CACHED_LHS into the expression in STMT.  */
1551   if ((TREE_CODE (cached_lhs) != SSA_NAME
1552        && (assigns_var_p
1553            || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1554       || may_propagate_copy_into_stmt (stmt, cached_lhs))
1555   {
1556       gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
1557 			   || is_gimple_min_invariant (cached_lhs));
1558 
1559       if (dump_file && (dump_flags & TDF_DETAILS))
1560 	{
1561 	  fprintf (dump_file, "  Replaced redundant expr '");
1562 	  print_gimple_expr (dump_file, stmt, 0, dump_flags);
1563 	  fprintf (dump_file, "' with '");
1564 	  print_generic_expr (dump_file, cached_lhs, dump_flags);
1565           fprintf (dump_file, "'\n");
1566 	}
1567 
1568       opt_stats.num_re++;
1569 
1570       if (assigns_var_p
1571 	  && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1572 	cached_lhs = fold_convert (expr_type, cached_lhs);
1573 
1574       propagate_tree_value_into_stmt (gsi, cached_lhs);
1575 
1576       /* Since it is always necessary to mark the result as modified,
1577          perhaps we should move this into propagate_tree_value_into_stmt
1578          itself.  */
1579       gimple_set_modified (gsi_stmt (*gsi), true);
1580   }
1581 }
1582 
1583 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1584    the available expressions table or the const_and_copies table.
1585    Detect and record those equivalences into AVAIL_EXPRS_STACK.
1586 
1587    We handle only very simple copy equivalences here.  The heavy
1588    lifing is done by eliminate_redundant_computations.  */
1589 
1590 static void
1591 record_equivalences_from_stmt (gimple *stmt, int may_optimize_p,
1592 			       class avail_exprs_stack *avail_exprs_stack)
1593 {
1594   tree lhs;
1595   enum tree_code lhs_code;
1596 
1597   gcc_assert (is_gimple_assign (stmt));
1598 
1599   lhs = gimple_assign_lhs (stmt);
1600   lhs_code = TREE_CODE (lhs);
1601 
1602   if (lhs_code == SSA_NAME
1603       && gimple_assign_single_p (stmt))
1604     {
1605       tree rhs = gimple_assign_rhs1 (stmt);
1606 
1607       /* If the RHS of the assignment is a constant or another variable that
1608 	 may be propagated, register it in the CONST_AND_COPIES table.  We
1609 	 do not need to record unwind data for this, since this is a true
1610 	 assignment and not an equivalence inferred from a comparison.  All
1611 	 uses of this ssa name are dominated by this assignment, so unwinding
1612 	 just costs time and space.  */
1613       if (may_optimize_p
1614 	  && (TREE_CODE (rhs) == SSA_NAME
1615 	      || is_gimple_min_invariant (rhs)))
1616 	{
1617 	  rhs = dom_valueize (rhs);
1618 
1619 	  if (dump_file && (dump_flags & TDF_DETAILS))
1620 	    {
1621 	      fprintf (dump_file, "==== ASGN ");
1622 	      print_generic_expr (dump_file, lhs);
1623 	      fprintf (dump_file, " = ");
1624 	      print_generic_expr (dump_file, rhs);
1625 	      fprintf (dump_file, "\n");
1626 	    }
1627 
1628 	  set_ssa_name_value (lhs, rhs);
1629 	}
1630     }
1631 
1632   /* Make sure we can propagate &x + CST.  */
1633   if (lhs_code == SSA_NAME
1634       && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
1635       && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
1636       && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
1637     {
1638       tree op0 = gimple_assign_rhs1 (stmt);
1639       tree op1 = gimple_assign_rhs2 (stmt);
1640       tree new_rhs
1641 	= build_fold_addr_expr (fold_build2 (MEM_REF,
1642 					     TREE_TYPE (TREE_TYPE (op0)),
1643 					     unshare_expr (op0),
1644 					     fold_convert (ptr_type_node,
1645 							   op1)));
1646       if (dump_file && (dump_flags & TDF_DETAILS))
1647 	{
1648 	  fprintf (dump_file, "==== ASGN ");
1649 	  print_generic_expr (dump_file, lhs);
1650 	  fprintf (dump_file, " = ");
1651 	  print_generic_expr (dump_file, new_rhs);
1652 	  fprintf (dump_file, "\n");
1653 	}
1654 
1655       set_ssa_name_value (lhs, new_rhs);
1656     }
1657 
1658   /* A memory store, even an aliased store, creates a useful
1659      equivalence.  By exchanging the LHS and RHS, creating suitable
1660      vops and recording the result in the available expression table,
1661      we may be able to expose more redundant loads.  */
1662   if (!gimple_has_volatile_ops (stmt)
1663       && gimple_references_memory_p (stmt)
1664       && gimple_assign_single_p (stmt)
1665       && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1666 	  || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1667       && !is_gimple_reg (lhs))
1668     {
1669       tree rhs = gimple_assign_rhs1 (stmt);
1670       gassign *new_stmt;
1671 
1672       /* Build a new statement with the RHS and LHS exchanged.  */
1673       if (TREE_CODE (rhs) == SSA_NAME)
1674         {
1675           /* NOTE tuples.  The call to gimple_build_assign below replaced
1676              a call to build_gimple_modify_stmt, which did not set the
1677              SSA_NAME_DEF_STMT on the LHS of the assignment.  Doing so
1678              may cause an SSA validation failure, as the LHS may be a
1679              default-initialized name and should have no definition.  I'm
1680              a bit dubious of this, as the artificial statement that we
1681              generate here may in fact be ill-formed, but it is simply
1682              used as an internal device in this pass, and never becomes
1683              part of the CFG.  */
1684 	  gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
1685           new_stmt = gimple_build_assign (rhs, lhs);
1686           SSA_NAME_DEF_STMT (rhs) = defstmt;
1687         }
1688       else
1689         new_stmt = gimple_build_assign (rhs, lhs);
1690 
1691       gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1692 
1693       /* Finally enter the statement into the available expression
1694 	 table.  */
1695       avail_exprs_stack->lookup_avail_expr (new_stmt, true, true);
1696     }
1697 }
1698 
1699 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1700    CONST_AND_COPIES.  */
1701 
1702 static void
1703 cprop_operand (gimple *stmt, use_operand_p op_p)
1704 {
1705   tree val;
1706   tree op = USE_FROM_PTR (op_p);
1707 
1708   /* If the operand has a known constant value or it is known to be a
1709      copy of some other variable, use the value or copy stored in
1710      CONST_AND_COPIES.  */
1711   val = SSA_NAME_VALUE (op);
1712   if (val && val != op)
1713     {
1714       /* Do not replace hard register operands in asm statements.  */
1715       if (gimple_code (stmt) == GIMPLE_ASM
1716 	  && !may_propagate_copy_into_asm (op))
1717 	return;
1718 
1719       /* Certain operands are not allowed to be copy propagated due
1720 	 to their interaction with exception handling and some GCC
1721 	 extensions.  */
1722       if (!may_propagate_copy (op, val))
1723 	return;
1724 
1725       /* Do not propagate copies into BIVs.
1726          See PR23821 and PR62217 for how this can disturb IV and
1727 	 number of iteration analysis.  */
1728       if (TREE_CODE (val) != INTEGER_CST)
1729 	{
1730 	  gimple *def = SSA_NAME_DEF_STMT (op);
1731 	  if (gimple_code (def) == GIMPLE_PHI
1732 	      && gimple_bb (def)->loop_father->header == gimple_bb (def))
1733 	    return;
1734 	}
1735 
1736       /* Dump details.  */
1737       if (dump_file && (dump_flags & TDF_DETAILS))
1738 	{
1739 	  fprintf (dump_file, "  Replaced '");
1740 	  print_generic_expr (dump_file, op, dump_flags);
1741 	  fprintf (dump_file, "' with %s '",
1742 		   (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
1743 	  print_generic_expr (dump_file, val, dump_flags);
1744 	  fprintf (dump_file, "'\n");
1745 	}
1746 
1747       if (TREE_CODE (val) != SSA_NAME)
1748 	opt_stats.num_const_prop++;
1749       else
1750 	opt_stats.num_copy_prop++;
1751 
1752       propagate_value (op_p, val);
1753 
1754       /* And note that we modified this statement.  This is now
1755 	 safe, even if we changed virtual operands since we will
1756 	 rescan the statement and rewrite its operands again.  */
1757       gimple_set_modified (stmt, true);
1758     }
1759 }
1760 
1761 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1762    known value for that SSA_NAME (or NULL if no value is known).
1763 
1764    Propagate values from CONST_AND_COPIES into the uses, vuses and
1765    vdef_ops of STMT.  */
1766 
1767 static void
1768 cprop_into_stmt (gimple *stmt)
1769 {
1770   use_operand_p op_p;
1771   ssa_op_iter iter;
1772   tree last_copy_propagated_op = NULL;
1773 
1774   FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
1775     {
1776       tree old_op = USE_FROM_PTR (op_p);
1777 
1778       /* If we have A = B and B = A in the copy propagation tables
1779 	 (due to an equality comparison), avoid substituting B for A
1780 	 then A for B in the trivially discovered cases.   This allows
1781 	 optimization of statements were A and B appear as input
1782 	 operands.  */
1783       if (old_op != last_copy_propagated_op)
1784 	{
1785 	  cprop_operand (stmt, op_p);
1786 
1787 	  tree new_op = USE_FROM_PTR (op_p);
1788 	  if (new_op != old_op && TREE_CODE (new_op) == SSA_NAME)
1789 	    last_copy_propagated_op = new_op;
1790 	}
1791     }
1792 }
1793 
1794 /* If STMT contains a relational test, try to convert it into an
1795    equality test if there is only a single value which can ever
1796    make the test true.
1797 
1798    For example, if the expression hash table contains:
1799 
1800     TRUE = (i <= 1)
1801 
1802    And we have a test within statement of i >= 1, then we can safely
1803    rewrite the test as i == 1 since there only a single value where
1804    the test is true.
1805 
1806    This is similar to code in VRP.  */
1807 
1808 static void
1809 test_for_singularity (gimple *stmt, gcond *dummy_cond,
1810 		      avail_exprs_stack *avail_exprs_stack)
1811 {
1812   /* We want to support gimple conditionals as well as assignments
1813      where the RHS contains a conditional.  */
1814   if (is_gimple_assign (stmt) || gimple_code (stmt) == GIMPLE_COND)
1815     {
1816       enum tree_code code = ERROR_MARK;
1817       tree lhs, rhs;
1818 
1819       /* Extract the condition of interest from both forms we support.  */
1820       if (is_gimple_assign (stmt))
1821 	{
1822 	  code = gimple_assign_rhs_code (stmt);
1823 	  lhs = gimple_assign_rhs1 (stmt);
1824 	  rhs = gimple_assign_rhs2 (stmt);
1825 	}
1826       else if (gimple_code (stmt) == GIMPLE_COND)
1827 	{
1828 	  code = gimple_cond_code (as_a <gcond *> (stmt));
1829 	  lhs = gimple_cond_lhs (as_a <gcond *> (stmt));
1830 	  rhs = gimple_cond_rhs (as_a <gcond *> (stmt));
1831 	}
1832 
1833       /* We're looking for a relational test using LE/GE.  Also note we can
1834 	 canonicalize LT/GT tests against constants into LE/GT tests.  */
1835       if (code == LE_EXPR || code == GE_EXPR
1836 	  || ((code == LT_EXPR || code == GT_EXPR)
1837 	       && TREE_CODE (rhs) == INTEGER_CST))
1838 	{
1839 	  /* For LT_EXPR and GT_EXPR, canonicalize to LE_EXPR and GE_EXPR.  */
1840 	  if (code == LT_EXPR)
1841 	    rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (rhs),
1842 			       rhs, build_int_cst (TREE_TYPE (rhs), 1));
1843 
1844 	  if (code == GT_EXPR)
1845 	    rhs = fold_build2 (PLUS_EXPR, TREE_TYPE (rhs),
1846 			       rhs, build_int_cst (TREE_TYPE (rhs), 1));
1847 
1848 	  /* Determine the code we want to check for in the hash table.  */
1849 	  enum tree_code test_code;
1850 	  if (code == GE_EXPR || code == GT_EXPR)
1851 	    test_code = LE_EXPR;
1852 	  else
1853 	    test_code = GE_EXPR;
1854 
1855 	  /* Update the dummy statement so we can query the hash tables.  */
1856 	  gimple_cond_set_code (dummy_cond, test_code);
1857 	  gimple_cond_set_lhs (dummy_cond, lhs);
1858 	  gimple_cond_set_rhs (dummy_cond, rhs);
1859 	  tree cached_lhs
1860 	    = avail_exprs_stack->lookup_avail_expr (dummy_cond, false, false);
1861 
1862 	  /* If the lookup returned 1 (true), then the expression we
1863 	     queried was in the hash table.  As a result there is only
1864 	     one value that makes the original conditional true.  Update
1865 	     STMT accordingly.  */
1866 	  if (cached_lhs && integer_onep (cached_lhs))
1867 	    {
1868 	      if (is_gimple_assign (stmt))
1869 		{
1870 		  gimple_assign_set_rhs_code (stmt, EQ_EXPR);
1871 		  gimple_assign_set_rhs2 (stmt, rhs);
1872 		  gimple_set_modified (stmt, true);
1873 		}
1874 	      else
1875 		{
1876 		  gimple_set_modified (stmt, true);
1877 		  gimple_cond_set_code (as_a <gcond *> (stmt), EQ_EXPR);
1878 		  gimple_cond_set_rhs (as_a <gcond *> (stmt), rhs);
1879 		  gimple_set_modified (stmt, true);
1880 		}
1881 	    }
1882 	}
1883     }
1884 }
1885 
1886 /* Optimize the statement in block BB pointed to by iterator SI.
1887 
1888    We try to perform some simplistic global redundancy elimination and
1889    constant propagation:
1890 
1891    1- To detect global redundancy, we keep track of expressions that have
1892       been computed in this block and its dominators.  If we find that the
1893       same expression is computed more than once, we eliminate repeated
1894       computations by using the target of the first one.
1895 
1896    2- Constant values and copy assignments.  This is used to do very
1897       simplistic constant and copy propagation.  When a constant or copy
1898       assignment is found, we map the value on the RHS of the assignment to
1899       the variable in the LHS in the CONST_AND_COPIES table.
1900 
1901    3- Very simple redundant store elimination is performed.
1902 
1903    4- We can simpify a condition to a constant or from a relational
1904       condition to an equality condition.  */
1905 
1906 edge
1907 dom_opt_dom_walker::optimize_stmt (basic_block bb, gimple_stmt_iterator si)
1908 {
1909   gimple *stmt, *old_stmt;
1910   bool may_optimize_p;
1911   bool modified_p = false;
1912   bool was_noreturn;
1913   edge retval = NULL;
1914 
1915   old_stmt = stmt = gsi_stmt (si);
1916   was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
1917 
1918   if (dump_file && (dump_flags & TDF_DETAILS))
1919     {
1920       fprintf (dump_file, "Optimizing statement ");
1921       print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1922     }
1923 
1924   update_stmt_if_modified (stmt);
1925   opt_stats.num_stmts++;
1926 
1927   /* Const/copy propagate into USES, VUSES and the RHS of VDEFs.  */
1928   cprop_into_stmt (stmt);
1929 
1930   /* If the statement has been modified with constant replacements,
1931      fold its RHS before checking for redundant computations.  */
1932   if (gimple_modified_p (stmt))
1933     {
1934       tree rhs = NULL;
1935 
1936       /* Try to fold the statement making sure that STMT is kept
1937 	 up to date.  */
1938       if (fold_stmt (&si))
1939 	{
1940 	  stmt = gsi_stmt (si);
1941 	  gimple_set_modified (stmt, true);
1942 
1943 	  if (dump_file && (dump_flags & TDF_DETAILS))
1944 	    {
1945 	      fprintf (dump_file, "  Folded to: ");
1946 	      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1947 	    }
1948 	}
1949 
1950       /* We only need to consider cases that can yield a gimple operand.  */
1951       if (gimple_assign_single_p (stmt))
1952         rhs = gimple_assign_rhs1 (stmt);
1953       else if (gimple_code (stmt) == GIMPLE_GOTO)
1954         rhs = gimple_goto_dest (stmt);
1955       else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1956         /* This should never be an ADDR_EXPR.  */
1957         rhs = gimple_switch_index (swtch_stmt);
1958 
1959       if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
1960         recompute_tree_invariant_for_addr_expr (rhs);
1961 
1962       /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
1963 	 even if fold_stmt updated the stmt already and thus cleared
1964 	 gimple_modified_p flag on it.  */
1965       modified_p = true;
1966     }
1967 
1968   /* Check for redundant computations.  Do this optimization only
1969      for assignments that have no volatile ops and conditionals.  */
1970   may_optimize_p = (!gimple_has_side_effects (stmt)
1971                     && (is_gimple_assign (stmt)
1972                         || (is_gimple_call (stmt)
1973                             && gimple_call_lhs (stmt) != NULL_TREE)
1974                         || gimple_code (stmt) == GIMPLE_COND
1975                         || gimple_code (stmt) == GIMPLE_SWITCH));
1976 
1977   if (may_optimize_p)
1978     {
1979       if (gimple_code (stmt) == GIMPLE_CALL)
1980 	{
1981 	  /* Resolve __builtin_constant_p.  If it hasn't been
1982 	     folded to integer_one_node by now, it's fairly
1983 	     certain that the value simply isn't constant.  */
1984 	  tree callee = gimple_call_fndecl (stmt);
1985 	  if (callee
1986 	      && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
1987 	      && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
1988 	    {
1989 	      propagate_tree_value_into_stmt (&si, integer_zero_node);
1990 	      stmt = gsi_stmt (si);
1991 	    }
1992 	}
1993 
1994       if (gimple_code (stmt) == GIMPLE_COND)
1995 	{
1996 	  tree lhs = gimple_cond_lhs (stmt);
1997 	  tree rhs = gimple_cond_rhs (stmt);
1998 
1999 	  /* If the LHS has a range [0..1] and the RHS has a range ~[0..1],
2000 	     then this conditional is computable at compile time.  We can just
2001 	     shove either 0 or 1 into the LHS, mark the statement as modified
2002 	     and all the right things will just happen below.
2003 
2004 	     Note this would apply to any case where LHS has a range
2005 	     narrower than its type implies and RHS is outside that
2006 	     narrower range.  Future work.  */
2007 	  if (TREE_CODE (lhs) == SSA_NAME
2008 	      && ssa_name_has_boolean_range (lhs)
2009 	      && TREE_CODE (rhs) == INTEGER_CST
2010 	      && ! (integer_zerop (rhs) || integer_onep (rhs)))
2011 	    {
2012 	      gimple_cond_set_lhs (as_a <gcond *> (stmt),
2013 				   fold_convert (TREE_TYPE (lhs),
2014 						 integer_zero_node));
2015 	      gimple_set_modified (stmt, true);
2016 	    }
2017 	  else if (TREE_CODE (lhs) == SSA_NAME)
2018 	    {
2019 	      /* Exploiting EVRP data is not yet fully integrated into DOM
2020 		 but we need to do something for this case to avoid regressing
2021 		 udr4.f90 and new1.C which have unexecutable blocks with
2022 		 undefined behavior that get diagnosed if they're left in the
2023 		 IL because we've attached range information to new
2024 		 SSA_NAMES.  */
2025 	      update_stmt_if_modified (stmt);
2026 	      edge taken_edge = NULL;
2027 	      evrp_range_analyzer.vrp_visit_cond_stmt (as_a <gcond *> (stmt),
2028 						       &taken_edge);
2029 	      if (taken_edge)
2030 		{
2031 		  if (taken_edge->flags & EDGE_TRUE_VALUE)
2032 		    gimple_cond_make_true (as_a <gcond *> (stmt));
2033 		  else if (taken_edge->flags & EDGE_FALSE_VALUE)
2034 		    gimple_cond_make_false (as_a <gcond *> (stmt));
2035 		  else
2036 		    gcc_unreachable ();
2037 		  gimple_set_modified (stmt, true);
2038 		  update_stmt (stmt);
2039 		  cfg_altered = true;
2040 		  return taken_edge;
2041 		}
2042 	    }
2043 	}
2044 
2045       update_stmt_if_modified (stmt);
2046       eliminate_redundant_computations (&si, m_const_and_copies,
2047 					m_avail_exprs_stack);
2048       stmt = gsi_stmt (si);
2049 
2050       /* Perform simple redundant store elimination.  */
2051       if (gimple_assign_single_p (stmt)
2052 	  && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2053 	{
2054 	  tree lhs = gimple_assign_lhs (stmt);
2055 	  tree rhs = gimple_assign_rhs1 (stmt);
2056 	  tree cached_lhs;
2057 	  gassign *new_stmt;
2058 	  rhs = dom_valueize (rhs);
2059 	  /* Build a new statement with the RHS and LHS exchanged.  */
2060 	  if (TREE_CODE (rhs) == SSA_NAME)
2061 	    {
2062 	      gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
2063 	      new_stmt = gimple_build_assign (rhs, lhs);
2064 	      SSA_NAME_DEF_STMT (rhs) = defstmt;
2065 	    }
2066 	  else
2067 	    new_stmt = gimple_build_assign (rhs, lhs);
2068 	  gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2069 	  cached_lhs = m_avail_exprs_stack->lookup_avail_expr (new_stmt, false,
2070 							       false);
2071 	  if (cached_lhs && operand_equal_p (rhs, cached_lhs, 0))
2072 	    {
2073 	      basic_block bb = gimple_bb (stmt);
2074 	      unlink_stmt_vdef (stmt);
2075 	      if (gsi_remove (&si, true))
2076 		{
2077 		  bitmap_set_bit (need_eh_cleanup, bb->index);
2078 		  if (dump_file && (dump_flags & TDF_DETAILS))
2079 		    fprintf (dump_file, "  Flagged to clear EH edges.\n");
2080 		}
2081 	      release_defs (stmt);
2082 	      return retval;
2083 	    }
2084 	}
2085 
2086       /* If this statement was not redundant, we may still be able to simplify
2087 	 it, which may in turn allow other part of DOM or other passes to do
2088 	 a better job.  */
2089       test_for_singularity (stmt, m_dummy_cond, m_avail_exprs_stack);
2090     }
2091 
2092   /* Record any additional equivalences created by this statement.  */
2093   if (is_gimple_assign (stmt))
2094     record_equivalences_from_stmt (stmt, may_optimize_p, m_avail_exprs_stack);
2095 
2096   /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may
2097      know where it goes.  */
2098   if (gimple_modified_p (stmt) || modified_p)
2099     {
2100       tree val = NULL;
2101 
2102       if (gimple_code (stmt) == GIMPLE_COND)
2103         val = fold_binary_loc (gimple_location (stmt),
2104 			       gimple_cond_code (stmt), boolean_type_node,
2105 			       gimple_cond_lhs (stmt),
2106 			       gimple_cond_rhs (stmt));
2107       else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2108 	val = gimple_switch_index (swtch_stmt);
2109 
2110       if (val && TREE_CODE (val) == INTEGER_CST)
2111 	{
2112 	  retval = find_taken_edge (bb, val);
2113 	  if (retval)
2114 	    {
2115 	      /* Fix the condition to be either true or false.  */
2116 	      if (gimple_code (stmt) == GIMPLE_COND)
2117 		{
2118 		  if (integer_zerop (val))
2119 		    gimple_cond_make_false (as_a <gcond *> (stmt));
2120 		  else if (integer_onep (val))
2121 		    gimple_cond_make_true (as_a <gcond *> (stmt));
2122 		  else
2123 		    gcc_unreachable ();
2124 
2125 		  gimple_set_modified (stmt, true);
2126 		}
2127 
2128 	      /* Further simplifications may be possible.  */
2129 	      cfg_altered = true;
2130 	    }
2131 	}
2132 
2133       update_stmt_if_modified (stmt);
2134 
2135       /* If we simplified a statement in such a way as to be shown that it
2136 	 cannot trap, update the eh information and the cfg to match.  */
2137       if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2138 	{
2139 	  bitmap_set_bit (need_eh_cleanup, bb->index);
2140 	  if (dump_file && (dump_flags & TDF_DETAILS))
2141 	    fprintf (dump_file, "  Flagged to clear EH edges.\n");
2142 	}
2143 
2144       if (!was_noreturn
2145 	  && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2146 	need_noreturn_fixup.safe_push (stmt);
2147     }
2148   return retval;
2149 }
2150