1 /* SSA Dominator optimizations for trees
2    Copyright (C) 2001-2018 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "cfgloop.h"
33 #include "gimple-fold.h"
34 #include "tree-eh.h"
35 #include "tree-inline.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-into-ssa.h"
39 #include "domwalk.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-ssa-threadupdate.h"
42 #include "params.h"
43 #include "tree-ssa-scopedtables.h"
44 #include "tree-ssa-threadedge.h"
45 #include "tree-ssa-dom.h"
46 #include "gimplify.h"
47 #include "tree-cfgcleanup.h"
48 #include "dbgcnt.h"
49 #include "alloc-pool.h"
50 #include "tree-vrp.h"
51 #include "vr-values.h"
52 #include "gimple-ssa-evrp-analyze.h"
53 
54 /* This file implements optimizations on the dominator tree.  */
55 
56 /* Structure for recording edge equivalences.
57 
58    Computing and storing the edge equivalences instead of creating
59    them on-demand can save significant amounts of time, particularly
60    for pathological cases involving switch statements.
61 
62    These structures live for a single iteration of the dominator
63    optimizer in the edge's AUX field.  At the end of an iteration we
64    free each of these structures.  */
65 class edge_info
66 {
67  public:
68   typedef std::pair <tree, tree> equiv_pair;
69   edge_info (edge);
70   ~edge_info ();
71 
72   /* Record a simple LHS = RHS equivalence.  This may trigger
73      calls to derive_equivalences.  */
74   void record_simple_equiv (tree, tree);
75 
76   /* If traversing this edge creates simple equivalences, we store
77      them as LHS/RHS pairs within this vector.  */
78   vec<equiv_pair> simple_equivalences;
79 
80   /* Traversing an edge may also indicate one or more particular conditions
81      are true or false.  */
82   vec<cond_equivalence> cond_equivalences;
83 
84  private:
85   /* Derive equivalences by walking the use-def chains.  */
86   void derive_equivalences (tree, tree, int);
87 };
88 
89 /* Track whether or not we have changed the control flow graph.  */
90 static bool cfg_altered;
91 
92 /* Bitmap of blocks that have had EH statements cleaned.  We should
93    remove their dead edges eventually.  */
94 static bitmap need_eh_cleanup;
95 static vec<gimple *> need_noreturn_fixup;
96 
97 /* Statistics for dominator optimizations.  */
98 struct opt_stats_d
99 {
100   long num_stmts;
101   long num_exprs_considered;
102   long num_re;
103   long num_const_prop;
104   long num_copy_prop;
105 };
106 
107 static struct opt_stats_d opt_stats;
108 
109 /* Local functions.  */
110 static void record_equality (tree, tree, class const_and_copies *);
111 static void record_equivalences_from_phis (basic_block);
112 static void record_equivalences_from_incoming_edge (basic_block,
113 						    class const_and_copies *,
114 						    class avail_exprs_stack *);
115 static void eliminate_redundant_computations (gimple_stmt_iterator *,
116 					      class const_and_copies *,
117 					      class avail_exprs_stack *);
118 static void record_equivalences_from_stmt (gimple *, int,
119 					   class avail_exprs_stack *);
120 static void dump_dominator_optimization_stats (FILE *file,
121 					       hash_table<expr_elt_hasher> *);
122 
123 /* Constructor for EDGE_INFO.  An EDGE_INFO instance is always
124    associated with an edge E.  */
125 
edge_info(edge e)126 edge_info::edge_info (edge e)
127 {
128   /* Free the old one associated with E, if it exists and
129      associate our new object with E.  */
130   free_dom_edge_info (e);
131   e->aux = this;
132 
133   /* And initialize the embedded vectors.  */
134   simple_equivalences = vNULL;
135   cond_equivalences = vNULL;
136 }
137 
138 /* Destructor just needs to release the vectors.  */
139 
~edge_info(void)140 edge_info::~edge_info (void)
141 {
142   this->cond_equivalences.release ();
143   this->simple_equivalences.release ();
144 }
145 
146 /* NAME is known to have the value VALUE, which must be a constant.
147 
148    Walk through its use-def chain to see if there are other equivalences
149    we might be able to derive.
150 
151    RECURSION_LIMIT controls how far back we recurse through the use-def
152    chains.  */
153 
154 void
derive_equivalences(tree name,tree value,int recursion_limit)155 edge_info::derive_equivalences (tree name, tree value, int recursion_limit)
156 {
157   if (TREE_CODE (name) != SSA_NAME || TREE_CODE (value) != INTEGER_CST)
158     return;
159 
160   /* This records the equivalence for the toplevel object.  Do
161      this before checking the recursion limit.  */
162   simple_equivalences.safe_push (equiv_pair (name, value));
163 
164   /* Limit how far up the use-def chains we are willing to walk.  */
165   if (recursion_limit == 0)
166     return;
167 
168   /* We can walk up the use-def chains to potentially find more
169      equivalences.  */
170   gimple *def_stmt = SSA_NAME_DEF_STMT (name);
171   if (is_gimple_assign (def_stmt))
172     {
173       /* We know the result of DEF_STMT was zero.  See if that allows
174 	 us to deduce anything about the SSA_NAMEs used on the RHS.  */
175       enum tree_code code = gimple_assign_rhs_code (def_stmt);
176       switch (code)
177 	{
178 	case BIT_IOR_EXPR:
179 	  if (integer_zerop (value))
180 	    {
181 	      tree rhs1 = gimple_assign_rhs1 (def_stmt);
182 	      tree rhs2 = gimple_assign_rhs2 (def_stmt);
183 
184 	      value = build_zero_cst (TREE_TYPE (rhs1));
185 	      derive_equivalences (rhs1, value, recursion_limit - 1);
186 	      value = build_zero_cst (TREE_TYPE (rhs2));
187 	      derive_equivalences (rhs2, value, recursion_limit - 1);
188 	    }
189 	  break;
190 
191       /* We know the result of DEF_STMT was one.  See if that allows
192 	 us to deduce anything about the SSA_NAMEs used on the RHS.  */
193 	case BIT_AND_EXPR:
194 	  if (!integer_zerop (value))
195 	    {
196 	      tree rhs1 = gimple_assign_rhs1 (def_stmt);
197 	      tree rhs2 = gimple_assign_rhs2 (def_stmt);
198 
199 	      /* If either operand has a boolean range, then we
200 		 know its value must be one, otherwise we just know it
201 		 is nonzero.  The former is clearly useful, I haven't
202 		 seen cases where the latter is helpful yet.  */
203 	      if (TREE_CODE (rhs1) == SSA_NAME)
204 		{
205 		  if (ssa_name_has_boolean_range (rhs1))
206 		    {
207 		      value = build_one_cst (TREE_TYPE (rhs1));
208 		      derive_equivalences (rhs1, value, recursion_limit - 1);
209 		    }
210 		}
211 	      if (TREE_CODE (rhs2) == SSA_NAME)
212 		{
213 		  if (ssa_name_has_boolean_range (rhs2))
214 		    {
215 		      value = build_one_cst (TREE_TYPE (rhs2));
216 		      derive_equivalences (rhs2, value, recursion_limit - 1);
217 		    }
218 		}
219 	    }
220 	  break;
221 
222 	/* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
223 	   set via a widening type conversion, then we may be able to record
224 	   additional equivalences.  */
225 	case NOP_EXPR:
226 	case CONVERT_EXPR:
227 	  {
228 	    tree rhs = gimple_assign_rhs1 (def_stmt);
229 	    tree rhs_type = TREE_TYPE (rhs);
230 	    if (INTEGRAL_TYPE_P (rhs_type)
231 		&& (TYPE_PRECISION (TREE_TYPE (name))
232 		    >= TYPE_PRECISION (rhs_type))
233 		&& int_fits_type_p (value, rhs_type))
234 	      derive_equivalences (rhs,
235 				   fold_convert (rhs_type, value),
236 				   recursion_limit - 1);
237 	    break;
238 	  }
239 
240 	/* We can invert the operation of these codes trivially if
241 	   one of the RHS operands is a constant to produce a known
242 	   value for the other RHS operand.  */
243 	case POINTER_PLUS_EXPR:
244 	case PLUS_EXPR:
245 	  {
246 	    tree rhs1 = gimple_assign_rhs1 (def_stmt);
247 	    tree rhs2 = gimple_assign_rhs2 (def_stmt);
248 
249 	    /* If either argument is a constant, then we can compute
250 	       a constant value for the nonconstant argument.  */
251 	    if (TREE_CODE (rhs1) == INTEGER_CST
252 		&& TREE_CODE (rhs2) == SSA_NAME)
253 	      derive_equivalences (rhs2,
254 				   fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
255 						value, rhs1),
256 				   recursion_limit - 1);
257 	    else if (TREE_CODE (rhs2) == INTEGER_CST
258 		     && TREE_CODE (rhs1) == SSA_NAME)
259 	      derive_equivalences (rhs1,
260 				   fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
261 						value, rhs2),
262 				   recursion_limit - 1);
263 	    break;
264 	  }
265 
266 	/* If one of the operands is a constant, then we can compute
267 	   the value of the other operand.  If both operands are
268 	   SSA_NAMEs, then they must be equal if the result is zero.  */
269 	case MINUS_EXPR:
270 	  {
271 	    tree rhs1 = gimple_assign_rhs1 (def_stmt);
272 	    tree rhs2 = gimple_assign_rhs2 (def_stmt);
273 
274 	    /* If either argument is a constant, then we can compute
275 	       a constant value for the nonconstant argument.  */
276 	    if (TREE_CODE (rhs1) == INTEGER_CST
277 		&& TREE_CODE (rhs2) == SSA_NAME)
278 	      derive_equivalences (rhs2,
279 				   fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
280 						rhs1, value),
281 				   recursion_limit - 1);
282 	    else if (TREE_CODE (rhs2) == INTEGER_CST
283 		     && TREE_CODE (rhs1) == SSA_NAME)
284 	      derive_equivalences (rhs1,
285 				   fold_binary (PLUS_EXPR, TREE_TYPE (rhs1),
286 						value, rhs2),
287 				   recursion_limit - 1);
288 	    else if (integer_zerop (value))
289 	      {
290 		tree cond = build2 (EQ_EXPR, boolean_type_node,
291 				    gimple_assign_rhs1 (def_stmt),
292 				    gimple_assign_rhs2 (def_stmt));
293 		tree inverted = invert_truthvalue (cond);
294 		record_conditions (&this->cond_equivalences, cond, inverted);
295 	      }
296 	    break;
297 	  }
298 
299 
300 	case EQ_EXPR:
301 	case NE_EXPR:
302 	  {
303 	    if ((code == EQ_EXPR && integer_onep (value))
304 		|| (code == NE_EXPR && integer_zerop (value)))
305 	      {
306 		tree rhs1 = gimple_assign_rhs1 (def_stmt);
307 		tree rhs2 = gimple_assign_rhs2 (def_stmt);
308 
309 		/* If either argument is a constant, then record the
310 		   other argument as being the same as that constant.
311 
312 		   If neither operand is a constant, then we have a
313 		   conditional name == name equivalence.  */
314 		if (TREE_CODE (rhs1) == INTEGER_CST)
315 		  derive_equivalences (rhs2, rhs1, recursion_limit - 1);
316 		else if (TREE_CODE (rhs2) == INTEGER_CST)
317 		  derive_equivalences (rhs1, rhs2, recursion_limit - 1);
318 	      }
319 	    else
320 	      {
321 		tree cond = build2 (code, boolean_type_node,
322 				    gimple_assign_rhs1 (def_stmt),
323 				    gimple_assign_rhs2 (def_stmt));
324 		tree inverted = invert_truthvalue (cond);
325 		if (integer_zerop (value))
326 		  std::swap (cond, inverted);
327 		record_conditions (&this->cond_equivalences, cond, inverted);
328 	      }
329 	    break;
330 	  }
331 
332 	/* For BIT_NOT and NEGATE, we can just apply the operation to the
333 	   VALUE to get the new equivalence.  It will always be a constant
334 	   so we can recurse.  */
335 	case BIT_NOT_EXPR:
336 	case NEGATE_EXPR:
337 	  {
338 	    tree rhs = gimple_assign_rhs1 (def_stmt);
339 	    tree res = fold_build1 (code, TREE_TYPE (rhs), value);
340 	    derive_equivalences (rhs, res, recursion_limit - 1);
341 	    break;
342 	  }
343 
344 	default:
345 	  {
346 	    if (TREE_CODE_CLASS (code) == tcc_comparison)
347 	      {
348 		tree cond = build2 (code, boolean_type_node,
349 				    gimple_assign_rhs1 (def_stmt),
350 				    gimple_assign_rhs2 (def_stmt));
351 		tree inverted = invert_truthvalue (cond);
352 		if (integer_zerop (value))
353 		  std::swap (cond, inverted);
354 		record_conditions (&this->cond_equivalences, cond, inverted);
355 		break;
356 	      }
357 	    break;
358 	  }
359 	}
360     }
361 }
362 
363 void
record_simple_equiv(tree lhs,tree rhs)364 edge_info::record_simple_equiv (tree lhs, tree rhs)
365 {
366   /* If the RHS is a constant, then we may be able to derive
367      further equivalences.  Else just record the name = name
368      equivalence.  */
369   if (TREE_CODE (rhs) == INTEGER_CST)
370     derive_equivalences (lhs, rhs, 4);
371   else
372     simple_equivalences.safe_push (equiv_pair (lhs, rhs));
373 }
374 
375 /* Free the edge_info data attached to E, if it exists.  */
376 
377 void
free_dom_edge_info(edge e)378 free_dom_edge_info (edge e)
379 {
380   class edge_info *edge_info = (struct edge_info *)e->aux;
381 
382   if (edge_info)
383     delete edge_info;
384 }
385 
386 /* Free all EDGE_INFO structures associated with edges in the CFG.
387    If a particular edge can be threaded, copy the redirection
388    target from the EDGE_INFO structure into the edge's AUX field
389    as required by code to update the CFG and SSA graph for
390    jump threading.  */
391 
392 static void
free_all_edge_infos(void)393 free_all_edge_infos (void)
394 {
395   basic_block bb;
396   edge_iterator ei;
397   edge e;
398 
399   FOR_EACH_BB_FN (bb, cfun)
400     {
401       FOR_EACH_EDGE (e, ei, bb->preds)
402         {
403 	  free_dom_edge_info (e);
404 	  e->aux = NULL;
405 	}
406     }
407 }
408 
409 /* We have finished optimizing BB, record any information implied by
410    taking a specific outgoing edge from BB.  */
411 
412 static void
record_edge_info(basic_block bb)413 record_edge_info (basic_block bb)
414 {
415   gimple_stmt_iterator gsi = gsi_last_bb (bb);
416   class edge_info *edge_info;
417 
418   if (! gsi_end_p (gsi))
419     {
420       gimple *stmt = gsi_stmt (gsi);
421       location_t loc = gimple_location (stmt);
422 
423       if (gimple_code (stmt) == GIMPLE_SWITCH)
424 	{
425 	  gswitch *switch_stmt = as_a <gswitch *> (stmt);
426 	  tree index = gimple_switch_index (switch_stmt);
427 
428 	  if (TREE_CODE (index) == SSA_NAME)
429 	    {
430 	      int i;
431               int n_labels = gimple_switch_num_labels (switch_stmt);
432 	      tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
433 	      edge e;
434 	      edge_iterator ei;
435 
436 	      for (i = 0; i < n_labels; i++)
437 		{
438 		  tree label = gimple_switch_label (switch_stmt, i);
439 		  basic_block target_bb = label_to_block (CASE_LABEL (label));
440 		  if (CASE_HIGH (label)
441 		      || !CASE_LOW (label)
442 		      || info[target_bb->index])
443 		    info[target_bb->index] = error_mark_node;
444 		  else
445 		    info[target_bb->index] = label;
446 		}
447 
448 	      FOR_EACH_EDGE (e, ei, bb->succs)
449 		{
450 		  basic_block target_bb = e->dest;
451 		  tree label = info[target_bb->index];
452 
453 		  if (label != NULL && label != error_mark_node)
454 		    {
455 		      tree x = fold_convert_loc (loc, TREE_TYPE (index),
456 						 CASE_LOW (label));
457 		      edge_info = new class edge_info (e);
458 		      edge_info->record_simple_equiv (index, x);
459 		    }
460 		}
461 	      free (info);
462 	    }
463 	}
464 
465       /* A COND_EXPR may create equivalences too.  */
466       if (gimple_code (stmt) == GIMPLE_COND)
467 	{
468 	  edge true_edge;
469 	  edge false_edge;
470 
471           tree op0 = gimple_cond_lhs (stmt);
472           tree op1 = gimple_cond_rhs (stmt);
473           enum tree_code code = gimple_cond_code (stmt);
474 
475 	  extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
476 
477           /* Special case comparing booleans against a constant as we
478              know the value of OP0 on both arms of the branch.  i.e., we
479              can record an equivalence for OP0 rather than COND.
480 
481 	     However, don't do this if the constant isn't zero or one.
482 	     Such conditionals will get optimized more thoroughly during
483 	     the domwalk.  */
484 	  if ((code == EQ_EXPR || code == NE_EXPR)
485 	      && TREE_CODE (op0) == SSA_NAME
486 	      && ssa_name_has_boolean_range (op0)
487 	      && is_gimple_min_invariant (op1)
488 	      && (integer_zerop (op1) || integer_onep (op1)))
489             {
490 	      tree true_val = constant_boolean_node (true, TREE_TYPE (op0));
491 	      tree false_val = constant_boolean_node (false, TREE_TYPE (op0));
492 
493               if (code == EQ_EXPR)
494                 {
495 		  edge_info = new class edge_info (true_edge);
496 		  edge_info->record_simple_equiv (op0,
497 						  (integer_zerop (op1)
498 						   ? false_val : true_val));
499 		  edge_info = new class edge_info (false_edge);
500 		  edge_info->record_simple_equiv (op0,
501 						  (integer_zerop (op1)
502 						   ? true_val : false_val));
503                 }
504               else
505                 {
506 		  edge_info = new class edge_info (true_edge);
507 		  edge_info->record_simple_equiv (op0,
508 						  (integer_zerop (op1)
509 						   ? true_val : false_val));
510 		  edge_info = new class edge_info (false_edge);
511 		  edge_info->record_simple_equiv (op0,
512 						  (integer_zerop (op1)
513 						   ? false_val : true_val));
514                 }
515             }
516 	  /* This can show up in the IL as a result of copy propagation
517 	     it will eventually be canonicalized, but we have to cope
518 	     with this case within the pass.  */
519           else if (is_gimple_min_invariant (op0)
520                    && TREE_CODE (op1) == SSA_NAME)
521             {
522               tree cond = build2 (code, boolean_type_node, op0, op1);
523               tree inverted = invert_truthvalue_loc (loc, cond);
524               bool can_infer_simple_equiv
525                 = !(HONOR_SIGNED_ZEROS (op0)
526                     && real_zerop (op0));
527               struct edge_info *edge_info;
528 
529 	      edge_info = new class edge_info (true_edge);
530               record_conditions (&edge_info->cond_equivalences, cond, inverted);
531 
532               if (can_infer_simple_equiv && code == EQ_EXPR)
533 		edge_info->record_simple_equiv (op1, op0);
534 
535 	      edge_info = new class edge_info (false_edge);
536               record_conditions (&edge_info->cond_equivalences, inverted, cond);
537 
538               if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
539 		edge_info->record_simple_equiv (op1, op0);
540             }
541 
542           else if (TREE_CODE (op0) == SSA_NAME
543                    && (TREE_CODE (op1) == SSA_NAME
544                        || is_gimple_min_invariant (op1)))
545             {
546               tree cond = build2 (code, boolean_type_node, op0, op1);
547               tree inverted = invert_truthvalue_loc (loc, cond);
548               bool can_infer_simple_equiv
549                 = !(HONOR_SIGNED_ZEROS (op1)
550                     && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
551               struct edge_info *edge_info;
552 
553 	      edge_info = new class edge_info (true_edge);
554               record_conditions (&edge_info->cond_equivalences, cond, inverted);
555 
556               if (can_infer_simple_equiv && code == EQ_EXPR)
557 		edge_info->record_simple_equiv (op0, op1);
558 
559 	      edge_info = new class edge_info (false_edge);
560               record_conditions (&edge_info->cond_equivalences, inverted, cond);
561 
562               if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
563 		edge_info->record_simple_equiv (op0, op1);
564             }
565         }
566     }
567 }
568 
569 
570 class dom_opt_dom_walker : public dom_walker
571 {
572 public:
dom_opt_dom_walker(cdi_direction direction,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack,gcond * dummy_cond)573   dom_opt_dom_walker (cdi_direction direction,
574 		      class const_and_copies *const_and_copies,
575 		      class avail_exprs_stack *avail_exprs_stack,
576 		      gcond *dummy_cond)
577     : dom_walker (direction, REACHABLE_BLOCKS),
578       m_const_and_copies (const_and_copies),
579       m_avail_exprs_stack (avail_exprs_stack),
580       m_dummy_cond (dummy_cond) { }
581 
582   virtual edge before_dom_children (basic_block);
583   virtual void after_dom_children (basic_block);
584 
585 private:
586 
587   /* Unwindable equivalences, both const/copy and expression varieties.  */
588   class const_and_copies *m_const_and_copies;
589   class avail_exprs_stack *m_avail_exprs_stack;
590 
591   /* VRP data.  */
592   class evrp_range_analyzer evrp_range_analyzer;
593 
594   /* Dummy condition to avoid creating lots of throw away statements.  */
595   gcond *m_dummy_cond;
596 
597   /* Optimize a single statement within a basic block using the
598      various tables mantained by DOM.  Returns the taken edge if
599      the statement is a conditional with a statically determined
600      value.  */
601   edge optimize_stmt (basic_block, gimple_stmt_iterator);
602 };
603 
604 /* Jump threading, redundancy elimination and const/copy propagation.
605 
606    This pass may expose new symbols that need to be renamed into SSA.  For
607    every new symbol exposed, its corresponding bit will be set in
608    VARS_TO_RENAME.  */
609 
610 namespace {
611 
612 const pass_data pass_data_dominator =
613 {
614   GIMPLE_PASS, /* type */
615   "dom", /* name */
616   OPTGROUP_NONE, /* optinfo_flags */
617   TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
618   ( PROP_cfg | PROP_ssa ), /* properties_required */
619   0, /* properties_provided */
620   0, /* properties_destroyed */
621   0, /* todo_flags_start */
622   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
623 };
624 
625 class pass_dominator : public gimple_opt_pass
626 {
627 public:
pass_dominator(gcc::context * ctxt)628   pass_dominator (gcc::context *ctxt)
629     : gimple_opt_pass (pass_data_dominator, ctxt),
630       may_peel_loop_headers_p (false)
631   {}
632 
633   /* opt_pass methods: */
clone()634   opt_pass * clone () { return new pass_dominator (m_ctxt); }
set_pass_param(unsigned int n,bool param)635   void set_pass_param (unsigned int n, bool param)
636     {
637       gcc_assert (n == 0);
638       may_peel_loop_headers_p = param;
639     }
gate(function *)640   virtual bool gate (function *) { return flag_tree_dom != 0; }
641   virtual unsigned int execute (function *);
642 
643  private:
644   /* This flag is used to prevent loops from being peeled repeatedly in jump
645      threading; it will be removed once we preserve loop structures throughout
646      the compilation -- we will be able to mark the affected loops directly in
647      jump threading, and avoid peeling them next time.  */
648   bool may_peel_loop_headers_p;
649 }; // class pass_dominator
650 
651 unsigned int
execute(function * fun)652 pass_dominator::execute (function *fun)
653 {
654   memset (&opt_stats, 0, sizeof (opt_stats));
655 
656   /* Create our hash tables.  */
657   hash_table<expr_elt_hasher> *avail_exprs
658     = new hash_table<expr_elt_hasher> (1024);
659   class avail_exprs_stack *avail_exprs_stack
660     = new class avail_exprs_stack (avail_exprs);
661   class const_and_copies *const_and_copies = new class const_and_copies ();
662   need_eh_cleanup = BITMAP_ALLOC (NULL);
663   need_noreturn_fixup.create (0);
664 
665   calculate_dominance_info (CDI_DOMINATORS);
666   cfg_altered = false;
667 
668   /* We need to know loop structures in order to avoid destroying them
669      in jump threading.  Note that we still can e.g. thread through loop
670      headers to an exit edge, or through loop header to the loop body, assuming
671      that we update the loop info.
672 
673      TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
674      to several overly conservative bail-outs in jump threading, case
675      gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
676      missing.  We should improve jump threading in future then
677      LOOPS_HAVE_PREHEADERS won't be needed here.  */
678   loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
679 
680   /* Initialize the value-handle array.  */
681   threadedge_initialize_values ();
682 
683   /* We need accurate information regarding back edges in the CFG
684      for jump threading; this may include back edges that are not part of
685      a single loop.  */
686   mark_dfs_back_edges ();
687 
688   /* We want to create the edge info structures before the dominator walk
689      so that they'll be in place for the jump threader, particularly when
690      threading through a join block.
691 
692      The conditions will be lazily updated with global equivalences as
693      we reach them during the dominator walk.  */
694   basic_block bb;
695   FOR_EACH_BB_FN (bb, fun)
696     record_edge_info (bb);
697 
698   gcond *dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node,
699 					 integer_zero_node, NULL, NULL);
700 
701   /* Recursively walk the dominator tree optimizing statements.  */
702   dom_opt_dom_walker walker (CDI_DOMINATORS, const_and_copies,
703 			     avail_exprs_stack, dummy_cond);
704   walker.walk (fun->cfg->x_entry_block_ptr);
705 
706   /* Look for blocks where we cleared EDGE_EXECUTABLE on an outgoing
707      edge.  When found, remove jump threads which contain any outgoing
708      edge from the affected block.  */
709   if (cfg_altered)
710     {
711       FOR_EACH_BB_FN (bb, fun)
712 	{
713 	  edge_iterator ei;
714 	  edge e;
715 
716 	  /* First see if there are any edges without EDGE_EXECUTABLE
717 	     set.  */
718 	  bool found = false;
719 	  FOR_EACH_EDGE (e, ei, bb->succs)
720 	    {
721 	      if ((e->flags & EDGE_EXECUTABLE) == 0)
722 		{
723 		  found = true;
724 		  break;
725 		}
726 	    }
727 
728 	  /* If there were any such edges found, then remove jump threads
729 	     containing any edge leaving BB.  */
730 	  if (found)
731 	    FOR_EACH_EDGE (e, ei, bb->succs)
732 	      remove_jump_threads_including (e);
733 	}
734     }
735 
736   {
737     gimple_stmt_iterator gsi;
738     basic_block bb;
739     FOR_EACH_BB_FN (bb, fun)
740       {
741 	for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
742 	  update_stmt_if_modified (gsi_stmt (gsi));
743       }
744   }
745 
746   /* If we exposed any new variables, go ahead and put them into
747      SSA form now, before we handle jump threading.  This simplifies
748      interactions between rewriting of _DECL nodes into SSA form
749      and rewriting SSA_NAME nodes into SSA form after block
750      duplication and CFG manipulation.  */
751   update_ssa (TODO_update_ssa);
752 
753   free_all_edge_infos ();
754 
755   /* Thread jumps, creating duplicate blocks as needed.  */
756   cfg_altered |= thread_through_all_blocks (may_peel_loop_headers_p);
757 
758   if (cfg_altered)
759     free_dominance_info (CDI_DOMINATORS);
760 
761   /* Removal of statements may make some EH edges dead.  Purge
762      such edges from the CFG as needed.  */
763   if (!bitmap_empty_p (need_eh_cleanup))
764     {
765       unsigned i;
766       bitmap_iterator bi;
767 
768       /* Jump threading may have created forwarder blocks from blocks
769 	 needing EH cleanup; the new successor of these blocks, which
770 	 has inherited from the original block, needs the cleanup.
771 	 Don't clear bits in the bitmap, as that can break the bitmap
772 	 iterator.  */
773       EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
774 	{
775 	  basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
776 	  if (bb == NULL)
777 	    continue;
778 	  while (single_succ_p (bb)
779 		 && (single_succ_edge (bb)->flags
780 		     & (EDGE_EH|EDGE_DFS_BACK)) == 0)
781 	    bb = single_succ (bb);
782 	  if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
783 	    continue;
784 	  if ((unsigned) bb->index != i)
785 	    bitmap_set_bit (need_eh_cleanup, bb->index);
786 	}
787 
788       gimple_purge_all_dead_eh_edges (need_eh_cleanup);
789       bitmap_clear (need_eh_cleanup);
790     }
791 
792   /* Fixup stmts that became noreturn calls.  This may require splitting
793      blocks and thus isn't possible during the dominator walk or before
794      jump threading finished.  Do this in reverse order so we don't
795      inadvertedly remove a stmt we want to fixup by visiting a dominating
796      now noreturn call first.  */
797   while (!need_noreturn_fixup.is_empty ())
798     {
799       gimple *stmt = need_noreturn_fixup.pop ();
800       if (dump_file && dump_flags & TDF_DETAILS)
801 	{
802 	  fprintf (dump_file, "Fixing up noreturn call ");
803 	  print_gimple_stmt (dump_file, stmt, 0);
804 	  fprintf (dump_file, "\n");
805 	}
806       fixup_noreturn_call (stmt);
807     }
808 
809   statistics_counter_event (fun, "Redundant expressions eliminated",
810 			    opt_stats.num_re);
811   statistics_counter_event (fun, "Constants propagated",
812 			    opt_stats.num_const_prop);
813   statistics_counter_event (fun, "Copies propagated",
814 			    opt_stats.num_copy_prop);
815 
816   /* Debugging dumps.  */
817   if (dump_file && (dump_flags & TDF_STATS))
818     dump_dominator_optimization_stats (dump_file, avail_exprs);
819 
820   loop_optimizer_finalize ();
821 
822   /* Delete our main hashtable.  */
823   delete avail_exprs;
824   avail_exprs = NULL;
825 
826   /* Free asserted bitmaps and stacks.  */
827   BITMAP_FREE (need_eh_cleanup);
828   need_noreturn_fixup.release ();
829   delete avail_exprs_stack;
830   delete const_and_copies;
831 
832   /* Free the value-handle array.  */
833   threadedge_finalize_values ();
834 
835   return 0;
836 }
837 
838 } // anon namespace
839 
840 gimple_opt_pass *
make_pass_dominator(gcc::context * ctxt)841 make_pass_dominator (gcc::context *ctxt)
842 {
843   return new pass_dominator (ctxt);
844 }
845 
846 /* A hack until we remove threading from tree-vrp.c and bring the
847    simplification routine into the dom_opt_dom_walker class.  */
848 static class vr_values *x_vr_values;
849 
850 /* A trivial wrapper so that we can present the generic jump
851    threading code with a simple API for simplifying statements.  */
852 static tree
simplify_stmt_for_jump_threading(gimple * stmt,gimple * within_stmt ATTRIBUTE_UNUSED,class avail_exprs_stack * avail_exprs_stack,basic_block bb ATTRIBUTE_UNUSED)853 simplify_stmt_for_jump_threading (gimple *stmt,
854 				  gimple *within_stmt ATTRIBUTE_UNUSED,
855 				  class avail_exprs_stack *avail_exprs_stack,
856 				  basic_block bb ATTRIBUTE_UNUSED)
857 {
858   /* First query our hash table to see if the the expression is available
859      there.  A non-NULL return value will be either a constant or another
860      SSA_NAME.  */
861   tree cached_lhs =  avail_exprs_stack->lookup_avail_expr (stmt, false, true);
862   if (cached_lhs)
863     return cached_lhs;
864 
865   /* If the hash table query failed, query VRP information.  This is
866      essentially the same as tree-vrp's simplification routine.  The
867      copy in tree-vrp is scheduled for removal in gcc-9.  */
868   if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
869     {
870       cached_lhs
871 	= x_vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
872 						 gimple_cond_lhs (cond_stmt),
873 						 gimple_cond_rhs (cond_stmt),
874 						 within_stmt);
875       return cached_lhs;
876     }
877 
878   if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
879     {
880       tree op = gimple_switch_index (switch_stmt);
881       if (TREE_CODE (op) != SSA_NAME)
882 	return NULL_TREE;
883 
884       value_range *vr = x_vr_values->get_value_range (op);
885       if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
886 	  || symbolic_range_p (vr))
887 	return NULL_TREE;
888 
889       if (vr->type == VR_RANGE)
890 	{
891 	  size_t i, j;
892 
893 	  find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
894 
895 	  if (i == j)
896 	    {
897 	      tree label = gimple_switch_label (switch_stmt, i);
898 
899 	      if (CASE_HIGH (label) != NULL_TREE
900 		  ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
901 		     && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
902 		  : (tree_int_cst_equal (CASE_LOW (label), vr->min)
903 		     && tree_int_cst_equal (vr->min, vr->max)))
904 		return label;
905 
906 	      if (i > j)
907 		return gimple_switch_label (switch_stmt, 0);
908 	    }
909 	}
910 
911       if (vr->type == VR_ANTI_RANGE)
912           {
913             unsigned n = gimple_switch_num_labels (switch_stmt);
914             tree min_label = gimple_switch_label (switch_stmt, 1);
915             tree max_label = gimple_switch_label (switch_stmt, n - 1);
916 
917             /* The default label will be taken only if the anti-range of the
918                operand is entirely outside the bounds of all the (non-default)
919                case labels.  */
920             if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
921                 && (CASE_HIGH (max_label) != NULL_TREE
922                     ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
923                     : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
924             return gimple_switch_label (switch_stmt, 0);
925           }
926 	return NULL_TREE;
927     }
928 
929   if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
930     {
931       tree lhs = gimple_assign_lhs (assign_stmt);
932       if (TREE_CODE (lhs) == SSA_NAME
933 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
934 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
935 	  && stmt_interesting_for_vrp (stmt))
936 	{
937 	  edge dummy_e;
938 	  tree dummy_tree;
939 	  value_range new_vr = VR_INITIALIZER;
940 	  x_vr_values->extract_range_from_stmt (stmt, &dummy_e,
941 					      &dummy_tree, &new_vr);
942 	  if (range_int_cst_singleton_p (&new_vr))
943 	    return new_vr.min;
944 	}
945     }
946   return NULL;
947 }
948 
949 /* Valueize hook for gimple_fold_stmt_to_constant_1.  */
950 
951 static tree
dom_valueize(tree t)952 dom_valueize (tree t)
953 {
954   if (TREE_CODE (t) == SSA_NAME)
955     {
956       tree tem = SSA_NAME_VALUE (t);
957       if (tem)
958 	return tem;
959     }
960   return t;
961 }
962 
963 /* We have just found an equivalence for LHS on an edge E.
964    Look backwards to other uses of LHS and see if we can derive
965    additional equivalences that are valid on edge E.  */
966 static void
back_propagate_equivalences(tree lhs,edge e,class const_and_copies * const_and_copies)967 back_propagate_equivalences (tree lhs, edge e,
968 			     class const_and_copies *const_and_copies)
969 {
970   use_operand_p use_p;
971   imm_use_iterator iter;
972   bitmap domby = NULL;
973   basic_block dest = e->dest;
974 
975   /* Iterate over the uses of LHS to see if any dominate E->dest.
976      If so, they may create useful equivalences too.
977 
978      ???  If the code gets re-organized to a worklist to catch more
979      indirect opportunities and it is made to handle PHIs then this
980      should only consider use_stmts in basic-blocks we have already visited.  */
981   FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
982     {
983       gimple *use_stmt = USE_STMT (use_p);
984 
985       /* Often the use is in DEST, which we trivially know we can't use.
986 	 This is cheaper than the dominator set tests below.  */
987       if (dest == gimple_bb (use_stmt))
988 	continue;
989 
990       /* Filter out statements that can never produce a useful
991 	 equivalence.  */
992       tree lhs2 = gimple_get_lhs (use_stmt);
993       if (!lhs2 || TREE_CODE (lhs2) != SSA_NAME)
994 	continue;
995 
996       /* Profiling has shown the domination tests here can be fairly
997 	 expensive.  We get significant improvements by building the
998 	 set of blocks that dominate BB.  We can then just test
999 	 for set membership below.
1000 
1001 	 We also initialize the set lazily since often the only uses
1002 	 are going to be in the same block as DEST.  */
1003       if (!domby)
1004 	{
1005 	  domby = BITMAP_ALLOC (NULL);
1006 	  basic_block bb = get_immediate_dominator (CDI_DOMINATORS, dest);
1007 	  while (bb)
1008 	    {
1009 	      bitmap_set_bit (domby, bb->index);
1010 	      bb = get_immediate_dominator (CDI_DOMINATORS, bb);
1011 	    }
1012 	}
1013 
1014       /* This tests if USE_STMT does not dominate DEST.  */
1015       if (!bitmap_bit_p (domby, gimple_bb (use_stmt)->index))
1016 	continue;
1017 
1018       /* At this point USE_STMT dominates DEST and may result in a
1019 	 useful equivalence.  Try to simplify its RHS to a constant
1020 	 or SSA_NAME.  */
1021       tree res = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize,
1022 						 no_follow_ssa_edges);
1023       if (res && (TREE_CODE (res) == SSA_NAME || is_gimple_min_invariant (res)))
1024 	record_equality (lhs2, res, const_and_copies);
1025     }
1026 
1027   if (domby)
1028     BITMAP_FREE (domby);
1029 }
1030 
1031 /* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied
1032    by traversing edge E (which are cached in E->aux).
1033 
1034    Callers are responsible for managing the unwinding markers.  */
1035 void
record_temporary_equivalences(edge e,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack)1036 record_temporary_equivalences (edge e,
1037 			       class const_and_copies *const_and_copies,
1038 			       class avail_exprs_stack *avail_exprs_stack)
1039 {
1040   int i;
1041   class edge_info *edge_info = (class edge_info *) e->aux;
1042 
1043   /* If we have info associated with this edge, record it into
1044      our equivalence tables.  */
1045   if (edge_info)
1046     {
1047       cond_equivalence *eq;
1048       /* If we have 0 = COND or 1 = COND equivalences, record them
1049 	 into our expression hash tables.  */
1050       for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1051 	avail_exprs_stack->record_cond (eq);
1052 
1053       edge_info::equiv_pair *seq;
1054       for (i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
1055 	{
1056 	  tree lhs = seq->first;
1057 	  if (!lhs || TREE_CODE (lhs) != SSA_NAME)
1058 	    continue;
1059 
1060 	  /* Record the simple NAME = VALUE equivalence.  */
1061 	  tree rhs = seq->second;
1062 
1063 	  /* If this is a SSA_NAME = SSA_NAME equivalence and one operand is
1064 	     cheaper to compute than the other, then set up the equivalence
1065 	     such that we replace the expensive one with the cheap one.
1066 
1067 	     If they are the same cost to compute, then do not record
1068 	     anything.  */
1069 	  if (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs) == SSA_NAME)
1070 	    {
1071 	      gimple *rhs_def = SSA_NAME_DEF_STMT (rhs);
1072 	      int rhs_cost = estimate_num_insns (rhs_def, &eni_size_weights);
1073 
1074 	      gimple *lhs_def = SSA_NAME_DEF_STMT (lhs);
1075 	      int lhs_cost = estimate_num_insns (lhs_def, &eni_size_weights);
1076 
1077 	      if (rhs_cost > lhs_cost)
1078 	        record_equality (rhs, lhs, const_and_copies);
1079 	      else if (rhs_cost < lhs_cost)
1080 	        record_equality (lhs, rhs, const_and_copies);
1081 	    }
1082 	  else
1083 	    record_equality (lhs, rhs, const_and_copies);
1084 
1085 
1086 	  /* Any equivalence found for LHS may result in additional
1087 	     equivalences for other uses of LHS that we have already
1088 	     processed.  */
1089 	  back_propagate_equivalences (lhs, e, const_and_copies);
1090 	}
1091     }
1092 }
1093 
1094 /* PHI nodes can create equivalences too.
1095 
1096    Ignoring any alternatives which are the same as the result, if
1097    all the alternatives are equal, then the PHI node creates an
1098    equivalence.  */
1099 
1100 static void
record_equivalences_from_phis(basic_block bb)1101 record_equivalences_from_phis (basic_block bb)
1102 {
1103   gphi_iterator gsi;
1104 
1105   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1106     {
1107       gphi *phi = gsi.phi ();
1108 
1109       tree lhs = gimple_phi_result (phi);
1110       tree rhs = NULL;
1111       size_t i;
1112 
1113       for (i = 0; i < gimple_phi_num_args (phi); i++)
1114 	{
1115 	  tree t = gimple_phi_arg_def (phi, i);
1116 
1117 	  /* Ignore alternatives which are the same as our LHS.  Since
1118 	     LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1119 	     can simply compare pointers.  */
1120 	  if (lhs == t)
1121 	    continue;
1122 
1123 	  /* If the associated edge is not marked as executable, then it
1124 	     can be ignored.  */
1125 	  if ((gimple_phi_arg_edge (phi, i)->flags & EDGE_EXECUTABLE) == 0)
1126 	    continue;
1127 
1128 	  t = dom_valueize (t);
1129 
1130 	  /* If T is an SSA_NAME and its associated edge is a backedge,
1131 	     then quit as we can not utilize this equivalence.  */
1132 	  if (TREE_CODE (t) == SSA_NAME
1133 	      && (gimple_phi_arg_edge (phi, i)->flags & EDGE_DFS_BACK))
1134 	    break;
1135 
1136 	  /* If we have not processed an alternative yet, then set
1137 	     RHS to this alternative.  */
1138 	  if (rhs == NULL)
1139 	    rhs = t;
1140 	  /* If we have processed an alternative (stored in RHS), then
1141 	     see if it is equal to this one.  If it isn't, then stop
1142 	     the search.  */
1143 	  else if (! operand_equal_for_phi_arg_p (rhs, t))
1144 	    break;
1145 	}
1146 
1147       /* If we had no interesting alternatives, then all the RHS alternatives
1148 	 must have been the same as LHS.  */
1149       if (!rhs)
1150 	rhs = lhs;
1151 
1152       /* If we managed to iterate through each PHI alternative without
1153 	 breaking out of the loop, then we have a PHI which may create
1154 	 a useful equivalence.  We do not need to record unwind data for
1155 	 this, since this is a true assignment and not an equivalence
1156 	 inferred from a comparison.  All uses of this ssa name are dominated
1157 	 by this assignment, so unwinding just costs time and space.  */
1158       if (i == gimple_phi_num_args (phi)
1159 	  && may_propagate_copy (lhs, rhs))
1160 	set_ssa_name_value (lhs, rhs);
1161     }
1162 }
1163 
1164 /* Record any equivalences created by the incoming edge to BB into
1165    CONST_AND_COPIES and AVAIL_EXPRS_STACK.  If BB has more than one
1166    incoming edge, then no equivalence is created.  */
1167 
1168 static void
record_equivalences_from_incoming_edge(basic_block bb,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack)1169 record_equivalences_from_incoming_edge (basic_block bb,
1170     class const_and_copies *const_and_copies,
1171     class avail_exprs_stack *avail_exprs_stack)
1172 {
1173   edge e;
1174   basic_block parent;
1175 
1176   /* If our parent block ended with a control statement, then we may be
1177      able to record some equivalences based on which outgoing edge from
1178      the parent was followed.  */
1179   parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1180 
1181   e = single_pred_edge_ignoring_loop_edges (bb, true);
1182 
1183   /* If we had a single incoming edge from our parent block, then enter
1184      any data associated with the edge into our tables.  */
1185   if (e && e->src == parent)
1186     record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
1187 }
1188 
1189 /* Dump statistics for the hash table HTAB.  */
1190 
1191 static void
htab_statistics(FILE * file,const hash_table<expr_elt_hasher> & htab)1192 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1193 {
1194   fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1195 	   (long) htab.size (),
1196 	   (long) htab.elements (),
1197 	   htab.collisions ());
1198 }
1199 
1200 /* Dump SSA statistics on FILE.  */
1201 
1202 static void
dump_dominator_optimization_stats(FILE * file,hash_table<expr_elt_hasher> * avail_exprs)1203 dump_dominator_optimization_stats (FILE *file,
1204 				   hash_table<expr_elt_hasher> *avail_exprs)
1205 {
1206   fprintf (file, "Total number of statements:                   %6ld\n\n",
1207 	   opt_stats.num_stmts);
1208   fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1209            opt_stats.num_exprs_considered);
1210 
1211   fprintf (file, "\nHash table statistics:\n");
1212 
1213   fprintf (file, "    avail_exprs: ");
1214   htab_statistics (file, *avail_exprs);
1215 }
1216 
1217 
1218 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1219    This constrains the cases in which we may treat this as assignment.  */
1220 
1221 static void
record_equality(tree x,tree y,class const_and_copies * const_and_copies)1222 record_equality (tree x, tree y, class const_and_copies *const_and_copies)
1223 {
1224   tree prev_x = NULL, prev_y = NULL;
1225 
1226   if (tree_swap_operands_p (x, y))
1227     std::swap (x, y);
1228 
1229   /* Most of the time tree_swap_operands_p does what we want.  But there
1230      are cases where we know one operand is better for copy propagation than
1231      the other.  Given no other code cares about ordering of equality
1232      comparison operators for that purpose, we just handle the special cases
1233      here.  */
1234   if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1235     {
1236       /* If one operand is a single use operand, then make it
1237 	 X.  This will preserve its single use properly and if this
1238 	 conditional is eliminated, the computation of X can be
1239 	 eliminated as well.  */
1240       if (has_single_use (y) && ! has_single_use (x))
1241 	std::swap (x, y);
1242     }
1243   if (TREE_CODE (x) == SSA_NAME)
1244     prev_x = SSA_NAME_VALUE (x);
1245   if (TREE_CODE (y) == SSA_NAME)
1246     prev_y = SSA_NAME_VALUE (y);
1247 
1248   /* If one of the previous values is invariant, or invariant in more loops
1249      (by depth), then use that.
1250      Otherwise it doesn't matter which value we choose, just so
1251      long as we canonicalize on one value.  */
1252   if (is_gimple_min_invariant (y))
1253     ;
1254   else if (is_gimple_min_invariant (x))
1255     prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1256   else if (prev_x && is_gimple_min_invariant (prev_x))
1257     x = y, y = prev_x, prev_x = prev_y;
1258   else if (prev_y)
1259     y = prev_y;
1260 
1261   /* After the swapping, we must have one SSA_NAME.  */
1262   if (TREE_CODE (x) != SSA_NAME)
1263     return;
1264 
1265   /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1266      variable compared against zero.  If we're honoring signed zeros,
1267      then we cannot record this value unless we know that the value is
1268      nonzero.  */
1269   if (HONOR_SIGNED_ZEROS (x)
1270       && (TREE_CODE (y) != REAL_CST
1271 	  || real_equal (&dconst0, &TREE_REAL_CST (y))))
1272     return;
1273 
1274   const_and_copies->record_const_or_copy (x, y, prev_x);
1275 }
1276 
1277 /* Returns true when STMT is a simple iv increment.  It detects the
1278    following situation:
1279 
1280    i_1 = phi (..., i_k)
1281    [...]
1282    i_j = i_{j-1}  for each j : 2 <= j <= k-1
1283    [...]
1284    i_k = i_{k-1} +/- ...  */
1285 
1286 bool
simple_iv_increment_p(gimple * stmt)1287 simple_iv_increment_p (gimple *stmt)
1288 {
1289   enum tree_code code;
1290   tree lhs, preinc;
1291   gimple *phi;
1292   size_t i;
1293 
1294   if (gimple_code (stmt) != GIMPLE_ASSIGN)
1295     return false;
1296 
1297   lhs = gimple_assign_lhs (stmt);
1298   if (TREE_CODE (lhs) != SSA_NAME)
1299     return false;
1300 
1301   code = gimple_assign_rhs_code (stmt);
1302   if (code != PLUS_EXPR
1303       && code != MINUS_EXPR
1304       && code != POINTER_PLUS_EXPR)
1305     return false;
1306 
1307   preinc = gimple_assign_rhs1 (stmt);
1308   if (TREE_CODE (preinc) != SSA_NAME)
1309     return false;
1310 
1311   phi = SSA_NAME_DEF_STMT (preinc);
1312   while (gimple_code (phi) != GIMPLE_PHI)
1313     {
1314       /* Follow trivial copies, but not the DEF used in a back edge,
1315 	 so that we don't prevent coalescing.  */
1316       if (!gimple_assign_ssa_name_copy_p (phi))
1317 	return false;
1318       preinc = gimple_assign_rhs1 (phi);
1319       phi = SSA_NAME_DEF_STMT (preinc);
1320     }
1321 
1322   for (i = 0; i < gimple_phi_num_args (phi); i++)
1323     if (gimple_phi_arg_def (phi, i) == lhs)
1324       return true;
1325 
1326   return false;
1327 }
1328 
1329 /* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the
1330    successors of BB.  */
1331 
1332 static void
cprop_into_successor_phis(basic_block bb,class const_and_copies * const_and_copies)1333 cprop_into_successor_phis (basic_block bb,
1334 			   class const_and_copies *const_and_copies)
1335 {
1336   edge e;
1337   edge_iterator ei;
1338 
1339   FOR_EACH_EDGE (e, ei, bb->succs)
1340     {
1341       int indx;
1342       gphi_iterator gsi;
1343 
1344       /* If this is an abnormal edge, then we do not want to copy propagate
1345 	 into the PHI alternative associated with this edge.  */
1346       if (e->flags & EDGE_ABNORMAL)
1347 	continue;
1348 
1349       gsi = gsi_start_phis (e->dest);
1350       if (gsi_end_p (gsi))
1351 	continue;
1352 
1353       /* We may have an equivalence associated with this edge.  While
1354 	 we can not propagate it into non-dominated blocks, we can
1355 	 propagate them into PHIs in non-dominated blocks.  */
1356 
1357       /* Push the unwind marker so we can reset the const and copies
1358 	 table back to its original state after processing this edge.  */
1359       const_and_copies->push_marker ();
1360 
1361       /* Extract and record any simple NAME = VALUE equivalences.
1362 
1363 	 Don't bother with [01] = COND equivalences, they're not useful
1364 	 here.  */
1365       class edge_info *edge_info = (class edge_info *) e->aux;
1366 
1367       if (edge_info)
1368 	{
1369 	  edge_info::equiv_pair *seq;
1370 	  for (int i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
1371 	    {
1372 	      tree lhs = seq->first;
1373 	      tree rhs = seq->second;
1374 
1375 	      if (lhs && TREE_CODE (lhs) == SSA_NAME)
1376 		const_and_copies->record_const_or_copy (lhs, rhs);
1377 	    }
1378 
1379 	}
1380 
1381       indx = e->dest_idx;
1382       for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1383 	{
1384 	  tree new_val;
1385 	  use_operand_p orig_p;
1386 	  tree orig_val;
1387           gphi *phi = gsi.phi ();
1388 
1389 	  /* The alternative may be associated with a constant, so verify
1390 	     it is an SSA_NAME before doing anything with it.  */
1391 	  orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1392 	  orig_val = get_use_from_ptr (orig_p);
1393 	  if (TREE_CODE (orig_val) != SSA_NAME)
1394 	    continue;
1395 
1396 	  /* If we have *ORIG_P in our constant/copy table, then replace
1397 	     ORIG_P with its value in our constant/copy table.  */
1398 	  new_val = SSA_NAME_VALUE (orig_val);
1399 	  if (new_val
1400 	      && new_val != orig_val
1401 	      && may_propagate_copy (orig_val, new_val))
1402 	    propagate_value (orig_p, new_val);
1403 	}
1404 
1405       const_and_copies->pop_to_marker ();
1406     }
1407 }
1408 
1409 edge
before_dom_children(basic_block bb)1410 dom_opt_dom_walker::before_dom_children (basic_block bb)
1411 {
1412   gimple_stmt_iterator gsi;
1413 
1414   if (dump_file && (dump_flags & TDF_DETAILS))
1415     fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1416 
1417   evrp_range_analyzer.enter (bb);
1418 
1419   /* Push a marker on the stacks of local information so that we know how
1420      far to unwind when we finalize this block.  */
1421   m_avail_exprs_stack->push_marker ();
1422   m_const_and_copies->push_marker ();
1423 
1424   record_equivalences_from_incoming_edge (bb, m_const_and_copies,
1425 					  m_avail_exprs_stack);
1426 
1427   /* PHI nodes can create equivalences too.  */
1428   record_equivalences_from_phis (bb);
1429 
1430   /* Create equivalences from redundant PHIs.  PHIs are only truly
1431      redundant when they exist in the same block, so push another
1432      marker and unwind right afterwards.  */
1433   m_avail_exprs_stack->push_marker ();
1434   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1435     eliminate_redundant_computations (&gsi, m_const_and_copies,
1436 				      m_avail_exprs_stack);
1437   m_avail_exprs_stack->pop_to_marker ();
1438 
1439   edge taken_edge = NULL;
1440   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1441     {
1442       evrp_range_analyzer.record_ranges_from_stmt (gsi_stmt (gsi), false);
1443       taken_edge = this->optimize_stmt (bb, gsi);
1444     }
1445 
1446   /* Now prepare to process dominated blocks.  */
1447   record_edge_info (bb);
1448   cprop_into_successor_phis (bb, m_const_and_copies);
1449   if (taken_edge && !dbg_cnt (dom_unreachable_edges))
1450     return NULL;
1451 
1452   return taken_edge;
1453 }
1454 
1455 /* We have finished processing the dominator children of BB, perform
1456    any finalization actions in preparation for leaving this node in
1457    the dominator tree.  */
1458 
1459 void
after_dom_children(basic_block bb)1460 dom_opt_dom_walker::after_dom_children (basic_block bb)
1461 {
1462   x_vr_values = evrp_range_analyzer.get_vr_values ();
1463   thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
1464 			 m_avail_exprs_stack,
1465 			 &evrp_range_analyzer,
1466 			 simplify_stmt_for_jump_threading);
1467   x_vr_values = NULL;
1468 
1469   /* These remove expressions local to BB from the tables.  */
1470   m_avail_exprs_stack->pop_to_marker ();
1471   m_const_and_copies->pop_to_marker ();
1472   evrp_range_analyzer.leave (bb);
1473 }
1474 
1475 /* Search for redundant computations in STMT.  If any are found, then
1476    replace them with the variable holding the result of the computation.
1477 
1478    If safe, record this expression into AVAIL_EXPRS_STACK and
1479    CONST_AND_COPIES.  */
1480 
1481 static void
eliminate_redundant_computations(gimple_stmt_iterator * gsi,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack)1482 eliminate_redundant_computations (gimple_stmt_iterator* gsi,
1483 				  class const_and_copies *const_and_copies,
1484 				  class avail_exprs_stack *avail_exprs_stack)
1485 {
1486   tree expr_type;
1487   tree cached_lhs;
1488   tree def;
1489   bool insert = true;
1490   bool assigns_var_p = false;
1491 
1492   gimple *stmt = gsi_stmt (*gsi);
1493 
1494   if (gimple_code (stmt) == GIMPLE_PHI)
1495     def = gimple_phi_result (stmt);
1496   else
1497     def = gimple_get_lhs (stmt);
1498 
1499   /* Certain expressions on the RHS can be optimized away, but can not
1500      themselves be entered into the hash tables.  */
1501   if (! def
1502       || TREE_CODE (def) != SSA_NAME
1503       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1504       || gimple_vdef (stmt)
1505       /* Do not record equivalences for increments of ivs.  This would create
1506 	 overlapping live ranges for a very questionable gain.  */
1507       || simple_iv_increment_p (stmt))
1508     insert = false;
1509 
1510   /* Check if the expression has been computed before.  */
1511   cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, insert, true);
1512 
1513   opt_stats.num_exprs_considered++;
1514 
1515   /* Get the type of the expression we are trying to optimize.  */
1516   if (is_gimple_assign (stmt))
1517     {
1518       expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1519       assigns_var_p = true;
1520     }
1521   else if (gimple_code (stmt) == GIMPLE_COND)
1522     expr_type = boolean_type_node;
1523   else if (is_gimple_call (stmt))
1524     {
1525       gcc_assert (gimple_call_lhs (stmt));
1526       expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1527       assigns_var_p = true;
1528     }
1529   else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1530     expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
1531   else if (gimple_code (stmt) == GIMPLE_PHI)
1532     /* We can't propagate into a phi, so the logic below doesn't apply.
1533        Instead record an equivalence between the cached LHS and the
1534        PHI result of this statement, provided they are in the same block.
1535        This should be sufficient to kill the redundant phi.  */
1536     {
1537       if (def && cached_lhs)
1538 	const_and_copies->record_const_or_copy (def, cached_lhs);
1539       return;
1540     }
1541   else
1542     gcc_unreachable ();
1543 
1544   if (!cached_lhs)
1545     return;
1546 
1547   /* It is safe to ignore types here since we have already done
1548      type checking in the hashing and equality routines.  In fact
1549      type checking here merely gets in the way of constant
1550      propagation.  Also, make sure that it is safe to propagate
1551      CACHED_LHS into the expression in STMT.  */
1552   if ((TREE_CODE (cached_lhs) != SSA_NAME
1553        && (assigns_var_p
1554            || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1555       || may_propagate_copy_into_stmt (stmt, cached_lhs))
1556   {
1557       gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
1558 			   || is_gimple_min_invariant (cached_lhs));
1559 
1560       if (dump_file && (dump_flags & TDF_DETAILS))
1561 	{
1562 	  fprintf (dump_file, "  Replaced redundant expr '");
1563 	  print_gimple_expr (dump_file, stmt, 0, dump_flags);
1564 	  fprintf (dump_file, "' with '");
1565 	  print_generic_expr (dump_file, cached_lhs, dump_flags);
1566           fprintf (dump_file, "'\n");
1567 	}
1568 
1569       opt_stats.num_re++;
1570 
1571       if (assigns_var_p
1572 	  && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1573 	cached_lhs = fold_convert (expr_type, cached_lhs);
1574 
1575       propagate_tree_value_into_stmt (gsi, cached_lhs);
1576 
1577       /* Since it is always necessary to mark the result as modified,
1578          perhaps we should move this into propagate_tree_value_into_stmt
1579          itself.  */
1580       gimple_set_modified (gsi_stmt (*gsi), true);
1581   }
1582 }
1583 
1584 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1585    the available expressions table or the const_and_copies table.
1586    Detect and record those equivalences into AVAIL_EXPRS_STACK.
1587 
1588    We handle only very simple copy equivalences here.  The heavy
1589    lifing is done by eliminate_redundant_computations.  */
1590 
1591 static void
record_equivalences_from_stmt(gimple * stmt,int may_optimize_p,class avail_exprs_stack * avail_exprs_stack)1592 record_equivalences_from_stmt (gimple *stmt, int may_optimize_p,
1593 			       class avail_exprs_stack *avail_exprs_stack)
1594 {
1595   tree lhs;
1596   enum tree_code lhs_code;
1597 
1598   gcc_assert (is_gimple_assign (stmt));
1599 
1600   lhs = gimple_assign_lhs (stmt);
1601   lhs_code = TREE_CODE (lhs);
1602 
1603   if (lhs_code == SSA_NAME
1604       && gimple_assign_single_p (stmt))
1605     {
1606       tree rhs = gimple_assign_rhs1 (stmt);
1607 
1608       /* If the RHS of the assignment is a constant or another variable that
1609 	 may be propagated, register it in the CONST_AND_COPIES table.  We
1610 	 do not need to record unwind data for this, since this is a true
1611 	 assignment and not an equivalence inferred from a comparison.  All
1612 	 uses of this ssa name are dominated by this assignment, so unwinding
1613 	 just costs time and space.  */
1614       if (may_optimize_p
1615 	  && (TREE_CODE (rhs) == SSA_NAME
1616 	      || is_gimple_min_invariant (rhs)))
1617 	{
1618 	  rhs = dom_valueize (rhs);
1619 
1620 	  if (dump_file && (dump_flags & TDF_DETAILS))
1621 	    {
1622 	      fprintf (dump_file, "==== ASGN ");
1623 	      print_generic_expr (dump_file, lhs);
1624 	      fprintf (dump_file, " = ");
1625 	      print_generic_expr (dump_file, rhs);
1626 	      fprintf (dump_file, "\n");
1627 	    }
1628 
1629 	  set_ssa_name_value (lhs, rhs);
1630 	}
1631     }
1632 
1633   /* Make sure we can propagate &x + CST.  */
1634   if (lhs_code == SSA_NAME
1635       && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
1636       && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
1637       && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
1638     {
1639       tree op0 = gimple_assign_rhs1 (stmt);
1640       tree op1 = gimple_assign_rhs2 (stmt);
1641       tree new_rhs
1642 	= build_fold_addr_expr (fold_build2 (MEM_REF,
1643 					     TREE_TYPE (TREE_TYPE (op0)),
1644 					     unshare_expr (op0),
1645 					     fold_convert (ptr_type_node,
1646 							   op1)));
1647       if (dump_file && (dump_flags & TDF_DETAILS))
1648 	{
1649 	  fprintf (dump_file, "==== ASGN ");
1650 	  print_generic_expr (dump_file, lhs);
1651 	  fprintf (dump_file, " = ");
1652 	  print_generic_expr (dump_file, new_rhs);
1653 	  fprintf (dump_file, "\n");
1654 	}
1655 
1656       set_ssa_name_value (lhs, new_rhs);
1657     }
1658 
1659   /* A memory store, even an aliased store, creates a useful
1660      equivalence.  By exchanging the LHS and RHS, creating suitable
1661      vops and recording the result in the available expression table,
1662      we may be able to expose more redundant loads.  */
1663   if (!gimple_has_volatile_ops (stmt)
1664       && gimple_references_memory_p (stmt)
1665       && gimple_assign_single_p (stmt)
1666       && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1667 	  || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1668       && !is_gimple_reg (lhs))
1669     {
1670       tree rhs = gimple_assign_rhs1 (stmt);
1671       gassign *new_stmt;
1672 
1673       /* Build a new statement with the RHS and LHS exchanged.  */
1674       if (TREE_CODE (rhs) == SSA_NAME)
1675         {
1676           /* NOTE tuples.  The call to gimple_build_assign below replaced
1677              a call to build_gimple_modify_stmt, which did not set the
1678              SSA_NAME_DEF_STMT on the LHS of the assignment.  Doing so
1679              may cause an SSA validation failure, as the LHS may be a
1680              default-initialized name and should have no definition.  I'm
1681              a bit dubious of this, as the artificial statement that we
1682              generate here may in fact be ill-formed, but it is simply
1683              used as an internal device in this pass, and never becomes
1684              part of the CFG.  */
1685 	  gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
1686           new_stmt = gimple_build_assign (rhs, lhs);
1687           SSA_NAME_DEF_STMT (rhs) = defstmt;
1688         }
1689       else
1690         new_stmt = gimple_build_assign (rhs, lhs);
1691 
1692       gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1693 
1694       /* Finally enter the statement into the available expression
1695 	 table.  */
1696       avail_exprs_stack->lookup_avail_expr (new_stmt, true, true);
1697     }
1698 }
1699 
1700 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1701    CONST_AND_COPIES.  */
1702 
1703 static void
cprop_operand(gimple * stmt,use_operand_p op_p)1704 cprop_operand (gimple *stmt, use_operand_p op_p)
1705 {
1706   tree val;
1707   tree op = USE_FROM_PTR (op_p);
1708 
1709   /* If the operand has a known constant value or it is known to be a
1710      copy of some other variable, use the value or copy stored in
1711      CONST_AND_COPIES.  */
1712   val = SSA_NAME_VALUE (op);
1713   if (val && val != op)
1714     {
1715       /* Do not replace hard register operands in asm statements.  */
1716       if (gimple_code (stmt) == GIMPLE_ASM
1717 	  && !may_propagate_copy_into_asm (op))
1718 	return;
1719 
1720       /* Certain operands are not allowed to be copy propagated due
1721 	 to their interaction with exception handling and some GCC
1722 	 extensions.  */
1723       if (!may_propagate_copy (op, val))
1724 	return;
1725 
1726       /* Do not propagate copies into BIVs.
1727          See PR23821 and PR62217 for how this can disturb IV and
1728 	 number of iteration analysis.  */
1729       if (TREE_CODE (val) != INTEGER_CST)
1730 	{
1731 	  gimple *def = SSA_NAME_DEF_STMT (op);
1732 	  if (gimple_code (def) == GIMPLE_PHI
1733 	      && gimple_bb (def)->loop_father->header == gimple_bb (def))
1734 	    return;
1735 	}
1736 
1737       /* Dump details.  */
1738       if (dump_file && (dump_flags & TDF_DETAILS))
1739 	{
1740 	  fprintf (dump_file, "  Replaced '");
1741 	  print_generic_expr (dump_file, op, dump_flags);
1742 	  fprintf (dump_file, "' with %s '",
1743 		   (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
1744 	  print_generic_expr (dump_file, val, dump_flags);
1745 	  fprintf (dump_file, "'\n");
1746 	}
1747 
1748       if (TREE_CODE (val) != SSA_NAME)
1749 	opt_stats.num_const_prop++;
1750       else
1751 	opt_stats.num_copy_prop++;
1752 
1753       propagate_value (op_p, val);
1754 
1755       /* And note that we modified this statement.  This is now
1756 	 safe, even if we changed virtual operands since we will
1757 	 rescan the statement and rewrite its operands again.  */
1758       gimple_set_modified (stmt, true);
1759     }
1760 }
1761 
1762 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1763    known value for that SSA_NAME (or NULL if no value is known).
1764 
1765    Propagate values from CONST_AND_COPIES into the uses, vuses and
1766    vdef_ops of STMT.  */
1767 
1768 static void
cprop_into_stmt(gimple * stmt)1769 cprop_into_stmt (gimple *stmt)
1770 {
1771   use_operand_p op_p;
1772   ssa_op_iter iter;
1773   tree last_copy_propagated_op = NULL;
1774 
1775   FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
1776     {
1777       tree old_op = USE_FROM_PTR (op_p);
1778 
1779       /* If we have A = B and B = A in the copy propagation tables
1780 	 (due to an equality comparison), avoid substituting B for A
1781 	 then A for B in the trivially discovered cases.   This allows
1782 	 optimization of statements were A and B appear as input
1783 	 operands.  */
1784       if (old_op != last_copy_propagated_op)
1785 	{
1786 	  cprop_operand (stmt, op_p);
1787 
1788 	  tree new_op = USE_FROM_PTR (op_p);
1789 	  if (new_op != old_op && TREE_CODE (new_op) == SSA_NAME)
1790 	    last_copy_propagated_op = new_op;
1791 	}
1792     }
1793 }
1794 
1795 /* If STMT contains a relational test, try to convert it into an
1796    equality test if there is only a single value which can ever
1797    make the test true.
1798 
1799    For example, if the expression hash table contains:
1800 
1801     TRUE = (i <= 1)
1802 
1803    And we have a test within statement of i >= 1, then we can safely
1804    rewrite the test as i == 1 since there only a single value where
1805    the test is true.
1806 
1807    This is similar to code in VRP.  */
1808 
1809 static void
test_for_singularity(gimple * stmt,gcond * dummy_cond,avail_exprs_stack * avail_exprs_stack)1810 test_for_singularity (gimple *stmt, gcond *dummy_cond,
1811 		      avail_exprs_stack *avail_exprs_stack)
1812 {
1813   /* We want to support gimple conditionals as well as assignments
1814      where the RHS contains a conditional.  */
1815   if (is_gimple_assign (stmt) || gimple_code (stmt) == GIMPLE_COND)
1816     {
1817       enum tree_code code = ERROR_MARK;
1818       tree lhs, rhs;
1819 
1820       /* Extract the condition of interest from both forms we support.  */
1821       if (is_gimple_assign (stmt))
1822 	{
1823 	  code = gimple_assign_rhs_code (stmt);
1824 	  lhs = gimple_assign_rhs1 (stmt);
1825 	  rhs = gimple_assign_rhs2 (stmt);
1826 	}
1827       else if (gimple_code (stmt) == GIMPLE_COND)
1828 	{
1829 	  code = gimple_cond_code (as_a <gcond *> (stmt));
1830 	  lhs = gimple_cond_lhs (as_a <gcond *> (stmt));
1831 	  rhs = gimple_cond_rhs (as_a <gcond *> (stmt));
1832 	}
1833 
1834       /* We're looking for a relational test using LE/GE.  Also note we can
1835 	 canonicalize LT/GT tests against constants into LE/GT tests.  */
1836       if (code == LE_EXPR || code == GE_EXPR
1837 	  || ((code == LT_EXPR || code == GT_EXPR)
1838 	       && TREE_CODE (rhs) == INTEGER_CST))
1839 	{
1840 	  /* For LT_EXPR and GT_EXPR, canonicalize to LE_EXPR and GE_EXPR.  */
1841 	  if (code == LT_EXPR)
1842 	    rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (rhs),
1843 			       rhs, build_int_cst (TREE_TYPE (rhs), 1));
1844 
1845 	  if (code == GT_EXPR)
1846 	    rhs = fold_build2 (PLUS_EXPR, TREE_TYPE (rhs),
1847 			       rhs, build_int_cst (TREE_TYPE (rhs), 1));
1848 
1849 	  /* Determine the code we want to check for in the hash table.  */
1850 	  enum tree_code test_code;
1851 	  if (code == GE_EXPR || code == GT_EXPR)
1852 	    test_code = LE_EXPR;
1853 	  else
1854 	    test_code = GE_EXPR;
1855 
1856 	  /* Update the dummy statement so we can query the hash tables.  */
1857 	  gimple_cond_set_code (dummy_cond, test_code);
1858 	  gimple_cond_set_lhs (dummy_cond, lhs);
1859 	  gimple_cond_set_rhs (dummy_cond, rhs);
1860 	  tree cached_lhs
1861 	    = avail_exprs_stack->lookup_avail_expr (dummy_cond, false, false);
1862 
1863 	  /* If the lookup returned 1 (true), then the expression we
1864 	     queried was in the hash table.  As a result there is only
1865 	     one value that makes the original conditional true.  Update
1866 	     STMT accordingly.  */
1867 	  if (cached_lhs && integer_onep (cached_lhs))
1868 	    {
1869 	      if (is_gimple_assign (stmt))
1870 		{
1871 		  gimple_assign_set_rhs_code (stmt, EQ_EXPR);
1872 		  gimple_assign_set_rhs2 (stmt, rhs);
1873 		  gimple_set_modified (stmt, true);
1874 		}
1875 	      else
1876 		{
1877 		  gimple_set_modified (stmt, true);
1878 		  gimple_cond_set_code (as_a <gcond *> (stmt), EQ_EXPR);
1879 		  gimple_cond_set_rhs (as_a <gcond *> (stmt), rhs);
1880 		  gimple_set_modified (stmt, true);
1881 		}
1882 	    }
1883 	}
1884     }
1885 }
1886 
1887 /* Optimize the statement in block BB pointed to by iterator SI.
1888 
1889    We try to perform some simplistic global redundancy elimination and
1890    constant propagation:
1891 
1892    1- To detect global redundancy, we keep track of expressions that have
1893       been computed in this block and its dominators.  If we find that the
1894       same expression is computed more than once, we eliminate repeated
1895       computations by using the target of the first one.
1896 
1897    2- Constant values and copy assignments.  This is used to do very
1898       simplistic constant and copy propagation.  When a constant or copy
1899       assignment is found, we map the value on the RHS of the assignment to
1900       the variable in the LHS in the CONST_AND_COPIES table.
1901 
1902    3- Very simple redundant store elimination is performed.
1903 
1904    4- We can simpify a condition to a constant or from a relational
1905       condition to an equality condition.  */
1906 
1907 edge
optimize_stmt(basic_block bb,gimple_stmt_iterator si)1908 dom_opt_dom_walker::optimize_stmt (basic_block bb, gimple_stmt_iterator si)
1909 {
1910   gimple *stmt, *old_stmt;
1911   bool may_optimize_p;
1912   bool modified_p = false;
1913   bool was_noreturn;
1914   edge retval = NULL;
1915 
1916   old_stmt = stmt = gsi_stmt (si);
1917   was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
1918 
1919   if (dump_file && (dump_flags & TDF_DETAILS))
1920     {
1921       fprintf (dump_file, "Optimizing statement ");
1922       print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1923     }
1924 
1925   update_stmt_if_modified (stmt);
1926   opt_stats.num_stmts++;
1927 
1928   /* Const/copy propagate into USES, VUSES and the RHS of VDEFs.  */
1929   cprop_into_stmt (stmt);
1930 
1931   /* If the statement has been modified with constant replacements,
1932      fold its RHS before checking for redundant computations.  */
1933   if (gimple_modified_p (stmt))
1934     {
1935       tree rhs = NULL;
1936 
1937       /* Try to fold the statement making sure that STMT is kept
1938 	 up to date.  */
1939       if (fold_stmt (&si))
1940 	{
1941 	  stmt = gsi_stmt (si);
1942 	  gimple_set_modified (stmt, true);
1943 
1944 	  if (dump_file && (dump_flags & TDF_DETAILS))
1945 	    {
1946 	      fprintf (dump_file, "  Folded to: ");
1947 	      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1948 	    }
1949 	}
1950 
1951       /* We only need to consider cases that can yield a gimple operand.  */
1952       if (gimple_assign_single_p (stmt))
1953         rhs = gimple_assign_rhs1 (stmt);
1954       else if (gimple_code (stmt) == GIMPLE_GOTO)
1955         rhs = gimple_goto_dest (stmt);
1956       else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1957         /* This should never be an ADDR_EXPR.  */
1958         rhs = gimple_switch_index (swtch_stmt);
1959 
1960       if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
1961         recompute_tree_invariant_for_addr_expr (rhs);
1962 
1963       /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
1964 	 even if fold_stmt updated the stmt already and thus cleared
1965 	 gimple_modified_p flag on it.  */
1966       modified_p = true;
1967     }
1968 
1969   /* Check for redundant computations.  Do this optimization only
1970      for assignments that have no volatile ops and conditionals.  */
1971   may_optimize_p = (!gimple_has_side_effects (stmt)
1972                     && (is_gimple_assign (stmt)
1973                         || (is_gimple_call (stmt)
1974                             && gimple_call_lhs (stmt) != NULL_TREE)
1975                         || gimple_code (stmt) == GIMPLE_COND
1976                         || gimple_code (stmt) == GIMPLE_SWITCH));
1977 
1978   if (may_optimize_p)
1979     {
1980       if (gimple_code (stmt) == GIMPLE_CALL)
1981 	{
1982 	  /* Resolve __builtin_constant_p.  If it hasn't been
1983 	     folded to integer_one_node by now, it's fairly
1984 	     certain that the value simply isn't constant.  */
1985 	  tree callee = gimple_call_fndecl (stmt);
1986 	  if (callee
1987 	      && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
1988 	      && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
1989 	    {
1990 	      propagate_tree_value_into_stmt (&si, integer_zero_node);
1991 	      stmt = gsi_stmt (si);
1992 	    }
1993 	}
1994 
1995       if (gimple_code (stmt) == GIMPLE_COND)
1996 	{
1997 	  tree lhs = gimple_cond_lhs (stmt);
1998 	  tree rhs = gimple_cond_rhs (stmt);
1999 
2000 	  /* If the LHS has a range [0..1] and the RHS has a range ~[0..1],
2001 	     then this conditional is computable at compile time.  We can just
2002 	     shove either 0 or 1 into the LHS, mark the statement as modified
2003 	     and all the right things will just happen below.
2004 
2005 	     Note this would apply to any case where LHS has a range
2006 	     narrower than its type implies and RHS is outside that
2007 	     narrower range.  Future work.  */
2008 	  if (TREE_CODE (lhs) == SSA_NAME
2009 	      && ssa_name_has_boolean_range (lhs)
2010 	      && TREE_CODE (rhs) == INTEGER_CST
2011 	      && ! (integer_zerop (rhs) || integer_onep (rhs)))
2012 	    {
2013 	      gimple_cond_set_lhs (as_a <gcond *> (stmt),
2014 				   fold_convert (TREE_TYPE (lhs),
2015 						 integer_zero_node));
2016 	      gimple_set_modified (stmt, true);
2017 	    }
2018 	  else if (TREE_CODE (lhs) == SSA_NAME)
2019 	    {
2020 	      /* Exploiting EVRP data is not yet fully integrated into DOM
2021 		 but we need to do something for this case to avoid regressing
2022 		 udr4.f90 and new1.C which have unexecutable blocks with
2023 		 undefined behavior that get diagnosed if they're left in the
2024 		 IL because we've attached range information to new
2025 		 SSA_NAMES.  */
2026 	      update_stmt_if_modified (stmt);
2027 	      edge taken_edge = NULL;
2028 	      evrp_range_analyzer.vrp_visit_cond_stmt (as_a <gcond *> (stmt),
2029 						       &taken_edge);
2030 	      if (taken_edge)
2031 		{
2032 		  if (taken_edge->flags & EDGE_TRUE_VALUE)
2033 		    gimple_cond_make_true (as_a <gcond *> (stmt));
2034 		  else if (taken_edge->flags & EDGE_FALSE_VALUE)
2035 		    gimple_cond_make_false (as_a <gcond *> (stmt));
2036 		  else
2037 		    gcc_unreachable ();
2038 		  gimple_set_modified (stmt, true);
2039 		  update_stmt (stmt);
2040 		  cfg_altered = true;
2041 		  return taken_edge;
2042 		}
2043 	    }
2044 	}
2045 
2046       update_stmt_if_modified (stmt);
2047       eliminate_redundant_computations (&si, m_const_and_copies,
2048 					m_avail_exprs_stack);
2049       stmt = gsi_stmt (si);
2050 
2051       /* Perform simple redundant store elimination.  */
2052       if (gimple_assign_single_p (stmt)
2053 	  && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2054 	{
2055 	  tree lhs = gimple_assign_lhs (stmt);
2056 	  tree rhs = gimple_assign_rhs1 (stmt);
2057 	  tree cached_lhs;
2058 	  gassign *new_stmt;
2059 	  rhs = dom_valueize (rhs);
2060 	  /* Build a new statement with the RHS and LHS exchanged.  */
2061 	  if (TREE_CODE (rhs) == SSA_NAME)
2062 	    {
2063 	      gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
2064 	      new_stmt = gimple_build_assign (rhs, lhs);
2065 	      SSA_NAME_DEF_STMT (rhs) = defstmt;
2066 	    }
2067 	  else
2068 	    new_stmt = gimple_build_assign (rhs, lhs);
2069 	  gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2070 	  cached_lhs = m_avail_exprs_stack->lookup_avail_expr (new_stmt, false,
2071 							       false);
2072 	  if (cached_lhs && operand_equal_p (rhs, cached_lhs, 0))
2073 	    {
2074 	      basic_block bb = gimple_bb (stmt);
2075 	      unlink_stmt_vdef (stmt);
2076 	      if (gsi_remove (&si, true))
2077 		{
2078 		  bitmap_set_bit (need_eh_cleanup, bb->index);
2079 		  if (dump_file && (dump_flags & TDF_DETAILS))
2080 		    fprintf (dump_file, "  Flagged to clear EH edges.\n");
2081 		}
2082 	      release_defs (stmt);
2083 	      return retval;
2084 	    }
2085 	}
2086 
2087       /* If this statement was not redundant, we may still be able to simplify
2088 	 it, which may in turn allow other part of DOM or other passes to do
2089 	 a better job.  */
2090       test_for_singularity (stmt, m_dummy_cond, m_avail_exprs_stack);
2091     }
2092 
2093   /* Record any additional equivalences created by this statement.  */
2094   if (is_gimple_assign (stmt))
2095     record_equivalences_from_stmt (stmt, may_optimize_p, m_avail_exprs_stack);
2096 
2097   /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may
2098      know where it goes.  */
2099   if (gimple_modified_p (stmt) || modified_p)
2100     {
2101       tree val = NULL;
2102 
2103       if (gimple_code (stmt) == GIMPLE_COND)
2104         val = fold_binary_loc (gimple_location (stmt),
2105 			       gimple_cond_code (stmt), boolean_type_node,
2106 			       gimple_cond_lhs (stmt),
2107 			       gimple_cond_rhs (stmt));
2108       else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2109 	val = gimple_switch_index (swtch_stmt);
2110 
2111       if (val && TREE_CODE (val) == INTEGER_CST)
2112 	{
2113 	  retval = find_taken_edge (bb, val);
2114 	  if (retval)
2115 	    {
2116 	      /* Fix the condition to be either true or false.  */
2117 	      if (gimple_code (stmt) == GIMPLE_COND)
2118 		{
2119 		  if (integer_zerop (val))
2120 		    gimple_cond_make_false (as_a <gcond *> (stmt));
2121 		  else if (integer_onep (val))
2122 		    gimple_cond_make_true (as_a <gcond *> (stmt));
2123 		  else
2124 		    gcc_unreachable ();
2125 
2126 		  gimple_set_modified (stmt, true);
2127 		}
2128 
2129 	      /* Further simplifications may be possible.  */
2130 	      cfg_altered = true;
2131 	    }
2132 	}
2133 
2134       update_stmt_if_modified (stmt);
2135 
2136       /* If we simplified a statement in such a way as to be shown that it
2137 	 cannot trap, update the eh information and the cfg to match.  */
2138       if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2139 	{
2140 	  bitmap_set_bit (need_eh_cleanup, bb->index);
2141 	  if (dump_file && (dump_flags & TDF_DETAILS))
2142 	    fprintf (dump_file, "  Flagged to clear EH edges.\n");
2143 	}
2144 
2145       if (!was_noreturn
2146 	  && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2147 	need_noreturn_fixup.safe_push (stmt);
2148     }
2149   return retval;
2150 }
2151