1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2021 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "cfgloop.h"
33 #include "gimple-fold.h"
34 #include "tree-eh.h"
35 #include "tree-inline.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-into-ssa.h"
39 #include "domwalk.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-ssa-threadupdate.h"
42 #include "tree-ssa-scopedtables.h"
43 #include "tree-ssa-threadedge.h"
44 #include "tree-ssa-dom.h"
45 #include "gimplify.h"
46 #include "tree-cfgcleanup.h"
47 #include "dbgcnt.h"
48 #include "alloc-pool.h"
49 #include "tree-vrp.h"
50 #include "vr-values.h"
51 #include "gimple-ssa-evrp-analyze.h"
52 #include "alias.h"
53
54 /* This file implements optimizations on the dominator tree. */
55
56 /* Structure for recording edge equivalences.
57
58 Computing and storing the edge equivalences instead of creating
59 them on-demand can save significant amounts of time, particularly
60 for pathological cases involving switch statements.
61
62 These structures live for a single iteration of the dominator
63 optimizer in the edge's AUX field. At the end of an iteration we
64 free each of these structures. */
65 class edge_info
66 {
67 public:
68 typedef std::pair <tree, tree> equiv_pair;
69 edge_info (edge);
70 ~edge_info ();
71
72 /* Record a simple LHS = RHS equivalence. This may trigger
73 calls to derive_equivalences. */
74 void record_simple_equiv (tree, tree);
75
76 /* If traversing this edge creates simple equivalences, we store
77 them as LHS/RHS pairs within this vector. */
78 vec<equiv_pair> simple_equivalences;
79
80 /* Traversing an edge may also indicate one or more particular conditions
81 are true or false. */
82 vec<cond_equivalence> cond_equivalences;
83
84 private:
85 /* Derive equivalences by walking the use-def chains. */
86 void derive_equivalences (tree, tree, int);
87 };
88
89 /* Track whether or not we have changed the control flow graph. */
90 static bool cfg_altered;
91
92 /* Bitmap of blocks that have had EH statements cleaned. We should
93 remove their dead edges eventually. */
94 static bitmap need_eh_cleanup;
95 static vec<gimple *> need_noreturn_fixup;
96
97 /* Statistics for dominator optimizations. */
98 struct opt_stats_d
99 {
100 long num_stmts;
101 long num_exprs_considered;
102 long num_re;
103 long num_const_prop;
104 long num_copy_prop;
105 };
106
107 static struct opt_stats_d opt_stats;
108
109 /* Local functions. */
110 static void record_equality (tree, tree, class const_and_copies *);
111 static void record_equivalences_from_phis (basic_block);
112 static void record_equivalences_from_incoming_edge (basic_block,
113 class const_and_copies *,
114 class avail_exprs_stack *);
115 static void eliminate_redundant_computations (gimple_stmt_iterator *,
116 class const_and_copies *,
117 class avail_exprs_stack *);
118 static void record_equivalences_from_stmt (gimple *, int,
119 class avail_exprs_stack *);
120 static void dump_dominator_optimization_stats (FILE *file,
121 hash_table<expr_elt_hasher> *);
122
123 /* Constructor for EDGE_INFO. An EDGE_INFO instance is always
124 associated with an edge E. */
125
edge_info(edge e)126 edge_info::edge_info (edge e)
127 {
128 /* Free the old one associated with E, if it exists and
129 associate our new object with E. */
130 free_dom_edge_info (e);
131 e->aux = this;
132
133 /* And initialize the embedded vectors. */
134 simple_equivalences = vNULL;
135 cond_equivalences = vNULL;
136 }
137
138 /* Destructor just needs to release the vectors. */
139
~edge_info(void)140 edge_info::~edge_info (void)
141 {
142 this->cond_equivalences.release ();
143 this->simple_equivalences.release ();
144 }
145
146 /* NAME is known to have the value VALUE, which must be a constant.
147
148 Walk through its use-def chain to see if there are other equivalences
149 we might be able to derive.
150
151 RECURSION_LIMIT controls how far back we recurse through the use-def
152 chains. */
153
154 void
derive_equivalences(tree name,tree value,int recursion_limit)155 edge_info::derive_equivalences (tree name, tree value, int recursion_limit)
156 {
157 if (TREE_CODE (name) != SSA_NAME || TREE_CODE (value) != INTEGER_CST)
158 return;
159
160 /* This records the equivalence for the toplevel object. Do
161 this before checking the recursion limit. */
162 simple_equivalences.safe_push (equiv_pair (name, value));
163
164 /* Limit how far up the use-def chains we are willing to walk. */
165 if (recursion_limit == 0)
166 return;
167
168 /* We can walk up the use-def chains to potentially find more
169 equivalences. */
170 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
171 if (is_gimple_assign (def_stmt))
172 {
173 enum tree_code code = gimple_assign_rhs_code (def_stmt);
174 switch (code)
175 {
176 /* If the result of an OR is zero, then its operands are, too. */
177 case BIT_IOR_EXPR:
178 if (integer_zerop (value))
179 {
180 tree rhs1 = gimple_assign_rhs1 (def_stmt);
181 tree rhs2 = gimple_assign_rhs2 (def_stmt);
182
183 value = build_zero_cst (TREE_TYPE (rhs1));
184 derive_equivalences (rhs1, value, recursion_limit - 1);
185 value = build_zero_cst (TREE_TYPE (rhs2));
186 derive_equivalences (rhs2, value, recursion_limit - 1);
187 }
188 break;
189
190 /* If the result of an AND is nonzero, then its operands are, too. */
191 case BIT_AND_EXPR:
192 if (!integer_zerop (value))
193 {
194 tree rhs1 = gimple_assign_rhs1 (def_stmt);
195 tree rhs2 = gimple_assign_rhs2 (def_stmt);
196
197 /* If either operand has a boolean range, then we
198 know its value must be one, otherwise we just know it
199 is nonzero. The former is clearly useful, I haven't
200 seen cases where the latter is helpful yet. */
201 if (TREE_CODE (rhs1) == SSA_NAME)
202 {
203 if (ssa_name_has_boolean_range (rhs1))
204 {
205 value = build_one_cst (TREE_TYPE (rhs1));
206 derive_equivalences (rhs1, value, recursion_limit - 1);
207 }
208 }
209 if (TREE_CODE (rhs2) == SSA_NAME)
210 {
211 if (ssa_name_has_boolean_range (rhs2))
212 {
213 value = build_one_cst (TREE_TYPE (rhs2));
214 derive_equivalences (rhs2, value, recursion_limit - 1);
215 }
216 }
217 }
218 break;
219
220 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
221 set via a widening type conversion, then we may be able to record
222 additional equivalences. */
223 case NOP_EXPR:
224 case CONVERT_EXPR:
225 {
226 tree rhs = gimple_assign_rhs1 (def_stmt);
227 tree rhs_type = TREE_TYPE (rhs);
228 if (INTEGRAL_TYPE_P (rhs_type)
229 && (TYPE_PRECISION (TREE_TYPE (name))
230 >= TYPE_PRECISION (rhs_type))
231 && int_fits_type_p (value, rhs_type))
232 derive_equivalences (rhs,
233 fold_convert (rhs_type, value),
234 recursion_limit - 1);
235 break;
236 }
237
238 /* We can invert the operation of these codes trivially if
239 one of the RHS operands is a constant to produce a known
240 value for the other RHS operand. */
241 case POINTER_PLUS_EXPR:
242 case PLUS_EXPR:
243 {
244 tree rhs1 = gimple_assign_rhs1 (def_stmt);
245 tree rhs2 = gimple_assign_rhs2 (def_stmt);
246
247 /* If either argument is a constant, then we can compute
248 a constant value for the nonconstant argument. */
249 if (TREE_CODE (rhs1) == INTEGER_CST
250 && TREE_CODE (rhs2) == SSA_NAME)
251 derive_equivalences (rhs2,
252 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
253 value, rhs1),
254 recursion_limit - 1);
255 else if (TREE_CODE (rhs2) == INTEGER_CST
256 && TREE_CODE (rhs1) == SSA_NAME)
257 derive_equivalences (rhs1,
258 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
259 value, rhs2),
260 recursion_limit - 1);
261 break;
262 }
263
264 /* If one of the operands is a constant, then we can compute
265 the value of the other operand. If both operands are
266 SSA_NAMEs, then they must be equal if the result is zero. */
267 case MINUS_EXPR:
268 {
269 tree rhs1 = gimple_assign_rhs1 (def_stmt);
270 tree rhs2 = gimple_assign_rhs2 (def_stmt);
271
272 /* If either argument is a constant, then we can compute
273 a constant value for the nonconstant argument. */
274 if (TREE_CODE (rhs1) == INTEGER_CST
275 && TREE_CODE (rhs2) == SSA_NAME)
276 derive_equivalences (rhs2,
277 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
278 rhs1, value),
279 recursion_limit - 1);
280 else if (TREE_CODE (rhs2) == INTEGER_CST
281 && TREE_CODE (rhs1) == SSA_NAME)
282 derive_equivalences (rhs1,
283 fold_binary (PLUS_EXPR, TREE_TYPE (rhs1),
284 value, rhs2),
285 recursion_limit - 1);
286 else if (integer_zerop (value))
287 {
288 tree cond = build2 (EQ_EXPR, boolean_type_node,
289 gimple_assign_rhs1 (def_stmt),
290 gimple_assign_rhs2 (def_stmt));
291 tree inverted = invert_truthvalue (cond);
292 record_conditions (&this->cond_equivalences, cond, inverted);
293 }
294 break;
295 }
296
297 case EQ_EXPR:
298 case NE_EXPR:
299 {
300 if ((code == EQ_EXPR && integer_onep (value))
301 || (code == NE_EXPR && integer_zerop (value)))
302 {
303 tree rhs1 = gimple_assign_rhs1 (def_stmt);
304 tree rhs2 = gimple_assign_rhs2 (def_stmt);
305
306 /* If either argument is a constant, then record the
307 other argument as being the same as that constant.
308
309 If neither operand is a constant, then we have a
310 conditional name == name equivalence. */
311 if (TREE_CODE (rhs1) == INTEGER_CST)
312 derive_equivalences (rhs2, rhs1, recursion_limit - 1);
313 else if (TREE_CODE (rhs2) == INTEGER_CST)
314 derive_equivalences (rhs1, rhs2, recursion_limit - 1);
315 }
316 else
317 {
318 tree cond = build2 (code, boolean_type_node,
319 gimple_assign_rhs1 (def_stmt),
320 gimple_assign_rhs2 (def_stmt));
321 tree inverted = invert_truthvalue (cond);
322 if (integer_zerop (value))
323 std::swap (cond, inverted);
324 record_conditions (&this->cond_equivalences, cond, inverted);
325 }
326 break;
327 }
328
329 /* For BIT_NOT and NEGATE, we can just apply the operation to the
330 VALUE to get the new equivalence. It will always be a constant
331 so we can recurse. */
332 case BIT_NOT_EXPR:
333 case NEGATE_EXPR:
334 {
335 tree rhs = gimple_assign_rhs1 (def_stmt);
336 tree res;
337 /* If this is a NOT and the operand has a boolean range, then we
338 know its value must be zero or one. We are not supposed to
339 have a BIT_NOT_EXPR for boolean types with precision > 1 in
340 the general case, see e.g. the handling of TRUTH_NOT_EXPR in
341 the gimplifier, but it can be generated by match.pd out of
342 a BIT_XOR_EXPR wrapped in a BIT_AND_EXPR. Now the handling
343 of BIT_AND_EXPR above already forces a specific semantics for
344 boolean types with precision > 1 so we must do the same here,
345 otherwise we could change the semantics of TRUTH_NOT_EXPR for
346 boolean types with precision > 1. */
347 if (code == BIT_NOT_EXPR
348 && TREE_CODE (rhs) == SSA_NAME
349 && ssa_name_has_boolean_range (rhs))
350 {
351 if ((TREE_INT_CST_LOW (value) & 1) == 0)
352 res = build_one_cst (TREE_TYPE (rhs));
353 else
354 res = build_zero_cst (TREE_TYPE (rhs));
355 }
356 else
357 res = fold_build1 (code, TREE_TYPE (rhs), value);
358 derive_equivalences (rhs, res, recursion_limit - 1);
359 break;
360 }
361
362 default:
363 {
364 if (TREE_CODE_CLASS (code) == tcc_comparison)
365 {
366 tree cond = build2 (code, boolean_type_node,
367 gimple_assign_rhs1 (def_stmt),
368 gimple_assign_rhs2 (def_stmt));
369 tree inverted = invert_truthvalue (cond);
370 if (integer_zerop (value))
371 std::swap (cond, inverted);
372 record_conditions (&this->cond_equivalences, cond, inverted);
373 break;
374 }
375 break;
376 }
377 }
378 }
379 }
380
381 void
record_simple_equiv(tree lhs,tree rhs)382 edge_info::record_simple_equiv (tree lhs, tree rhs)
383 {
384 /* If the RHS is a constant, then we may be able to derive
385 further equivalences. Else just record the name = name
386 equivalence. */
387 if (TREE_CODE (rhs) == INTEGER_CST)
388 derive_equivalences (lhs, rhs, 4);
389 else
390 simple_equivalences.safe_push (equiv_pair (lhs, rhs));
391 }
392
393 /* Free the edge_info data attached to E, if it exists. */
394
395 void
free_dom_edge_info(edge e)396 free_dom_edge_info (edge e)
397 {
398 class edge_info *edge_info = (class edge_info *)e->aux;
399
400 if (edge_info)
401 delete edge_info;
402 }
403
404 /* Free all EDGE_INFO structures associated with edges in the CFG.
405 If a particular edge can be threaded, copy the redirection
406 target from the EDGE_INFO structure into the edge's AUX field
407 as required by code to update the CFG and SSA graph for
408 jump threading. */
409
410 static void
free_all_edge_infos(void)411 free_all_edge_infos (void)
412 {
413 basic_block bb;
414 edge_iterator ei;
415 edge e;
416
417 FOR_EACH_BB_FN (bb, cfun)
418 {
419 FOR_EACH_EDGE (e, ei, bb->preds)
420 {
421 free_dom_edge_info (e);
422 e->aux = NULL;
423 }
424 }
425 }
426
427 /* We have finished optimizing BB, record any information implied by
428 taking a specific outgoing edge from BB. */
429
430 static void
record_edge_info(basic_block bb)431 record_edge_info (basic_block bb)
432 {
433 gimple_stmt_iterator gsi = gsi_last_bb (bb);
434 class edge_info *edge_info;
435
436 if (! gsi_end_p (gsi))
437 {
438 gimple *stmt = gsi_stmt (gsi);
439 location_t loc = gimple_location (stmt);
440
441 if (gimple_code (stmt) == GIMPLE_SWITCH)
442 {
443 gswitch *switch_stmt = as_a <gswitch *> (stmt);
444 tree index = gimple_switch_index (switch_stmt);
445
446 if (TREE_CODE (index) == SSA_NAME)
447 {
448 int i;
449 int n_labels = gimple_switch_num_labels (switch_stmt);
450 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
451 edge e;
452 edge_iterator ei;
453
454 for (i = 0; i < n_labels; i++)
455 {
456 tree label = gimple_switch_label (switch_stmt, i);
457 basic_block target_bb
458 = label_to_block (cfun, CASE_LABEL (label));
459 if (CASE_HIGH (label)
460 || !CASE_LOW (label)
461 || info[target_bb->index])
462 info[target_bb->index] = error_mark_node;
463 else
464 info[target_bb->index] = label;
465 }
466
467 FOR_EACH_EDGE (e, ei, bb->succs)
468 {
469 basic_block target_bb = e->dest;
470 tree label = info[target_bb->index];
471
472 if (label != NULL && label != error_mark_node)
473 {
474 tree x = fold_convert_loc (loc, TREE_TYPE (index),
475 CASE_LOW (label));
476 edge_info = new class edge_info (e);
477 edge_info->record_simple_equiv (index, x);
478 }
479 }
480 free (info);
481 }
482 }
483
484 /* A COND_EXPR may create equivalences too. */
485 if (gimple_code (stmt) == GIMPLE_COND)
486 {
487 edge true_edge;
488 edge false_edge;
489
490 tree op0 = gimple_cond_lhs (stmt);
491 tree op1 = gimple_cond_rhs (stmt);
492 enum tree_code code = gimple_cond_code (stmt);
493
494 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
495
496 /* Special case comparing booleans against a constant as we
497 know the value of OP0 on both arms of the branch. i.e., we
498 can record an equivalence for OP0 rather than COND.
499
500 However, don't do this if the constant isn't zero or one.
501 Such conditionals will get optimized more thoroughly during
502 the domwalk. */
503 if ((code == EQ_EXPR || code == NE_EXPR)
504 && TREE_CODE (op0) == SSA_NAME
505 && ssa_name_has_boolean_range (op0)
506 && is_gimple_min_invariant (op1)
507 && (integer_zerop (op1) || integer_onep (op1)))
508 {
509 tree true_val = constant_boolean_node (true, TREE_TYPE (op0));
510 tree false_val = constant_boolean_node (false, TREE_TYPE (op0));
511
512 if (code == EQ_EXPR)
513 {
514 edge_info = new class edge_info (true_edge);
515 edge_info->record_simple_equiv (op0,
516 (integer_zerop (op1)
517 ? false_val : true_val));
518 edge_info = new class edge_info (false_edge);
519 edge_info->record_simple_equiv (op0,
520 (integer_zerop (op1)
521 ? true_val : false_val));
522 }
523 else
524 {
525 edge_info = new class edge_info (true_edge);
526 edge_info->record_simple_equiv (op0,
527 (integer_zerop (op1)
528 ? true_val : false_val));
529 edge_info = new class edge_info (false_edge);
530 edge_info->record_simple_equiv (op0,
531 (integer_zerop (op1)
532 ? false_val : true_val));
533 }
534 }
535 /* This can show up in the IL as a result of copy propagation
536 it will eventually be canonicalized, but we have to cope
537 with this case within the pass. */
538 else if (is_gimple_min_invariant (op0)
539 && TREE_CODE (op1) == SSA_NAME)
540 {
541 tree cond = build2 (code, boolean_type_node, op0, op1);
542 tree inverted = invert_truthvalue_loc (loc, cond);
543 bool can_infer_simple_equiv
544 = !(HONOR_SIGNED_ZEROS (op0)
545 && real_zerop (op0));
546 class edge_info *edge_info;
547
548 edge_info = new class edge_info (true_edge);
549 record_conditions (&edge_info->cond_equivalences, cond, inverted);
550
551 if (can_infer_simple_equiv && code == EQ_EXPR)
552 edge_info->record_simple_equiv (op1, op0);
553
554 edge_info = new class edge_info (false_edge);
555 record_conditions (&edge_info->cond_equivalences, inverted, cond);
556
557 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
558 edge_info->record_simple_equiv (op1, op0);
559 }
560
561 else if (TREE_CODE (op0) == SSA_NAME
562 && (TREE_CODE (op1) == SSA_NAME
563 || is_gimple_min_invariant (op1)))
564 {
565 tree cond = build2 (code, boolean_type_node, op0, op1);
566 tree inverted = invert_truthvalue_loc (loc, cond);
567 bool can_infer_simple_equiv
568 = !(HONOR_SIGNED_ZEROS (op1)
569 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
570 class edge_info *edge_info;
571
572 edge_info = new class edge_info (true_edge);
573 record_conditions (&edge_info->cond_equivalences, cond, inverted);
574
575 if (can_infer_simple_equiv && code == EQ_EXPR)
576 edge_info->record_simple_equiv (op0, op1);
577
578 edge_info = new class edge_info (false_edge);
579 record_conditions (&edge_info->cond_equivalences, inverted, cond);
580
581 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
582 edge_info->record_simple_equiv (op0, op1);
583 }
584 }
585 }
586 }
587
588
589 class dom_opt_dom_walker : public dom_walker
590 {
591 public:
dom_opt_dom_walker(cdi_direction direction,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack,gcond * dummy_cond)592 dom_opt_dom_walker (cdi_direction direction,
593 class const_and_copies *const_and_copies,
594 class avail_exprs_stack *avail_exprs_stack,
595 gcond *dummy_cond)
596 : dom_walker (direction, REACHABLE_BLOCKS),
597 m_const_and_copies (const_and_copies),
598 m_avail_exprs_stack (avail_exprs_stack),
599 evrp_range_analyzer (true),
600 m_dummy_cond (dummy_cond) { }
601
602 virtual edge before_dom_children (basic_block);
603 virtual void after_dom_children (basic_block);
604
605 private:
606
607 /* Unwindable equivalences, both const/copy and expression varieties. */
608 class const_and_copies *m_const_and_copies;
609 class avail_exprs_stack *m_avail_exprs_stack;
610
611 /* VRP data. */
612 class evrp_range_analyzer evrp_range_analyzer;
613
614 /* Dummy condition to avoid creating lots of throw away statements. */
615 gcond *m_dummy_cond;
616
617 /* Optimize a single statement within a basic block using the
618 various tables mantained by DOM. Returns the taken edge if
619 the statement is a conditional with a statically determined
620 value. */
621 edge optimize_stmt (basic_block, gimple_stmt_iterator *, bool *);
622 };
623
624 /* Jump threading, redundancy elimination and const/copy propagation.
625
626 This pass may expose new symbols that need to be renamed into SSA. For
627 every new symbol exposed, its corresponding bit will be set in
628 VARS_TO_RENAME. */
629
630 namespace {
631
632 const pass_data pass_data_dominator =
633 {
634 GIMPLE_PASS, /* type */
635 "dom", /* name */
636 OPTGROUP_NONE, /* optinfo_flags */
637 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
638 ( PROP_cfg | PROP_ssa ), /* properties_required */
639 0, /* properties_provided */
640 0, /* properties_destroyed */
641 0, /* todo_flags_start */
642 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
643 };
644
645 class pass_dominator : public gimple_opt_pass
646 {
647 public:
pass_dominator(gcc::context * ctxt)648 pass_dominator (gcc::context *ctxt)
649 : gimple_opt_pass (pass_data_dominator, ctxt),
650 may_peel_loop_headers_p (false)
651 {}
652
653 /* opt_pass methods: */
clone()654 opt_pass * clone () { return new pass_dominator (m_ctxt); }
set_pass_param(unsigned int n,bool param)655 void set_pass_param (unsigned int n, bool param)
656 {
657 gcc_assert (n == 0);
658 may_peel_loop_headers_p = param;
659 }
gate(function *)660 virtual bool gate (function *) { return flag_tree_dom != 0; }
661 virtual unsigned int execute (function *);
662
663 private:
664 /* This flag is used to prevent loops from being peeled repeatedly in jump
665 threading; it will be removed once we preserve loop structures throughout
666 the compilation -- we will be able to mark the affected loops directly in
667 jump threading, and avoid peeling them next time. */
668 bool may_peel_loop_headers_p;
669 }; // class pass_dominator
670
671 unsigned int
execute(function * fun)672 pass_dominator::execute (function *fun)
673 {
674 memset (&opt_stats, 0, sizeof (opt_stats));
675
676 /* Create our hash tables. */
677 hash_table<expr_elt_hasher> *avail_exprs
678 = new hash_table<expr_elt_hasher> (1024);
679 class avail_exprs_stack *avail_exprs_stack
680 = new class avail_exprs_stack (avail_exprs);
681 class const_and_copies *const_and_copies = new class const_and_copies ();
682 need_eh_cleanup = BITMAP_ALLOC (NULL);
683 need_noreturn_fixup.create (0);
684
685 calculate_dominance_info (CDI_DOMINATORS);
686 cfg_altered = false;
687
688 /* We need to know loop structures in order to avoid destroying them
689 in jump threading. Note that we still can e.g. thread through loop
690 headers to an exit edge, or through loop header to the loop body, assuming
691 that we update the loop info.
692
693 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
694 to several overly conservative bail-outs in jump threading, case
695 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
696 missing. We should improve jump threading in future then
697 LOOPS_HAVE_PREHEADERS won't be needed here. */
698 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES
699 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
700
701 /* Initialize the value-handle array. */
702 threadedge_initialize_values ();
703
704 /* We need accurate information regarding back edges in the CFG
705 for jump threading; this may include back edges that are not part of
706 a single loop. */
707 mark_dfs_back_edges ();
708
709 /* We want to create the edge info structures before the dominator walk
710 so that they'll be in place for the jump threader, particularly when
711 threading through a join block.
712
713 The conditions will be lazily updated with global equivalences as
714 we reach them during the dominator walk. */
715 basic_block bb;
716 FOR_EACH_BB_FN (bb, fun)
717 record_edge_info (bb);
718
719 gcond *dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node,
720 integer_zero_node, NULL, NULL);
721
722 /* Recursively walk the dominator tree optimizing statements. */
723 dom_opt_dom_walker walker (CDI_DOMINATORS, const_and_copies,
724 avail_exprs_stack, dummy_cond);
725 walker.walk (fun->cfg->x_entry_block_ptr);
726
727 /* Look for blocks where we cleared EDGE_EXECUTABLE on an outgoing
728 edge. When found, remove jump threads which contain any outgoing
729 edge from the affected block. */
730 if (cfg_altered)
731 {
732 FOR_EACH_BB_FN (bb, fun)
733 {
734 edge_iterator ei;
735 edge e;
736
737 /* First see if there are any edges without EDGE_EXECUTABLE
738 set. */
739 bool found = false;
740 FOR_EACH_EDGE (e, ei, bb->succs)
741 {
742 if ((e->flags & EDGE_EXECUTABLE) == 0)
743 {
744 found = true;
745 break;
746 }
747 }
748
749 /* If there were any such edges found, then remove jump threads
750 containing any edge leaving BB. */
751 if (found)
752 FOR_EACH_EDGE (e, ei, bb->succs)
753 remove_jump_threads_including (e);
754 }
755 }
756
757 {
758 gimple_stmt_iterator gsi;
759 basic_block bb;
760 FOR_EACH_BB_FN (bb, fun)
761 {
762 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
763 update_stmt_if_modified (gsi_stmt (gsi));
764 }
765 }
766
767 /* If we exposed any new variables, go ahead and put them into
768 SSA form now, before we handle jump threading. This simplifies
769 interactions between rewriting of _DECL nodes into SSA form
770 and rewriting SSA_NAME nodes into SSA form after block
771 duplication and CFG manipulation. */
772 update_ssa (TODO_update_ssa);
773
774 free_all_edge_infos ();
775
776 /* Thread jumps, creating duplicate blocks as needed. */
777 cfg_altered |= thread_through_all_blocks (may_peel_loop_headers_p);
778
779 if (cfg_altered)
780 free_dominance_info (CDI_DOMINATORS);
781
782 /* Removal of statements may make some EH edges dead. Purge
783 such edges from the CFG as needed. */
784 if (!bitmap_empty_p (need_eh_cleanup))
785 {
786 unsigned i;
787 bitmap_iterator bi;
788
789 /* Jump threading may have created forwarder blocks from blocks
790 needing EH cleanup; the new successor of these blocks, which
791 has inherited from the original block, needs the cleanup.
792 Don't clear bits in the bitmap, as that can break the bitmap
793 iterator. */
794 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
795 {
796 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
797 if (bb == NULL)
798 continue;
799 while (single_succ_p (bb)
800 && (single_succ_edge (bb)->flags
801 & (EDGE_EH|EDGE_DFS_BACK)) == 0)
802 bb = single_succ (bb);
803 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
804 continue;
805 if ((unsigned) bb->index != i)
806 bitmap_set_bit (need_eh_cleanup, bb->index);
807 }
808
809 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
810 bitmap_clear (need_eh_cleanup);
811 }
812
813 /* Fixup stmts that became noreturn calls. This may require splitting
814 blocks and thus isn't possible during the dominator walk or before
815 jump threading finished. Do this in reverse order so we don't
816 inadvertedly remove a stmt we want to fixup by visiting a dominating
817 now noreturn call first. */
818 while (!need_noreturn_fixup.is_empty ())
819 {
820 gimple *stmt = need_noreturn_fixup.pop ();
821 if (dump_file && dump_flags & TDF_DETAILS)
822 {
823 fprintf (dump_file, "Fixing up noreturn call ");
824 print_gimple_stmt (dump_file, stmt, 0);
825 fprintf (dump_file, "\n");
826 }
827 fixup_noreturn_call (stmt);
828 }
829
830 statistics_counter_event (fun, "Redundant expressions eliminated",
831 opt_stats.num_re);
832 statistics_counter_event (fun, "Constants propagated",
833 opt_stats.num_const_prop);
834 statistics_counter_event (fun, "Copies propagated",
835 opt_stats.num_copy_prop);
836
837 /* Debugging dumps. */
838 if (dump_file && (dump_flags & TDF_STATS))
839 dump_dominator_optimization_stats (dump_file, avail_exprs);
840
841 loop_optimizer_finalize ();
842
843 /* Delete our main hashtable. */
844 delete avail_exprs;
845 avail_exprs = NULL;
846
847 /* Free asserted bitmaps and stacks. */
848 BITMAP_FREE (need_eh_cleanup);
849 need_noreturn_fixup.release ();
850 delete avail_exprs_stack;
851 delete const_and_copies;
852
853 /* Free the value-handle array. */
854 threadedge_finalize_values ();
855
856 return 0;
857 }
858
859 } // anon namespace
860
861 gimple_opt_pass *
make_pass_dominator(gcc::context * ctxt)862 make_pass_dominator (gcc::context *ctxt)
863 {
864 return new pass_dominator (ctxt);
865 }
866
867 /* A hack until we remove threading from tree-vrp.c and bring the
868 simplification routine into the dom_opt_dom_walker class. */
869 static class vr_values *x_vr_values;
870
871 /* A trivial wrapper so that we can present the generic jump
872 threading code with a simple API for simplifying statements.
873
874 ?? This should be cleaned up. There's a virtually identical copy
875 of this function in tree-vrp.c. */
876
877 static tree
simplify_stmt_for_jump_threading(gimple * stmt,gimple * within_stmt ATTRIBUTE_UNUSED,class avail_exprs_stack * avail_exprs_stack,basic_block bb ATTRIBUTE_UNUSED)878 simplify_stmt_for_jump_threading (gimple *stmt,
879 gimple *within_stmt ATTRIBUTE_UNUSED,
880 class avail_exprs_stack *avail_exprs_stack,
881 basic_block bb ATTRIBUTE_UNUSED)
882 {
883 /* First query our hash table to see if the expression is available
884 there. A non-NULL return value will be either a constant or another
885 SSA_NAME. */
886 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
887 if (cached_lhs)
888 return cached_lhs;
889
890 /* If the hash table query failed, query VRP information. This is
891 essentially the same as tree-vrp's simplification routine. The
892 copy in tree-vrp is scheduled for removal in gcc-9. */
893 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
894 {
895 simplify_using_ranges simplifier (x_vr_values);
896 return simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
897 gimple_cond_lhs (cond_stmt),
898 gimple_cond_rhs (cond_stmt),
899 within_stmt);
900 }
901
902 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
903 {
904 tree op = gimple_switch_index (switch_stmt);
905 if (TREE_CODE (op) != SSA_NAME)
906 return NULL_TREE;
907
908 const value_range_equiv *vr = x_vr_values->get_value_range (op);
909 return find_case_label_range (switch_stmt, vr);
910 }
911
912 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
913 {
914 tree lhs = gimple_assign_lhs (assign_stmt);
915 if (TREE_CODE (lhs) == SSA_NAME
916 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
917 || POINTER_TYPE_P (TREE_TYPE (lhs)))
918 && stmt_interesting_for_vrp (stmt))
919 {
920 edge dummy_e;
921 tree dummy_tree;
922 value_range_equiv new_vr;
923 x_vr_values->extract_range_from_stmt (stmt, &dummy_e,
924 &dummy_tree, &new_vr);
925 tree singleton;
926 if (new_vr.singleton_p (&singleton))
927 return singleton;
928 }
929 }
930 return NULL;
931 }
932
933 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
934
935 static tree
dom_valueize(tree t)936 dom_valueize (tree t)
937 {
938 if (TREE_CODE (t) == SSA_NAME)
939 {
940 tree tem = SSA_NAME_VALUE (t);
941 if (tem)
942 return tem;
943 }
944 return t;
945 }
946
947 /* We have just found an equivalence for LHS on an edge E.
948 Look backwards to other uses of LHS and see if we can derive
949 additional equivalences that are valid on edge E. */
950 static void
back_propagate_equivalences(tree lhs,edge e,class const_and_copies * const_and_copies)951 back_propagate_equivalences (tree lhs, edge e,
952 class const_and_copies *const_and_copies)
953 {
954 use_operand_p use_p;
955 imm_use_iterator iter;
956 bitmap domby = NULL;
957 basic_block dest = e->dest;
958
959 /* Iterate over the uses of LHS to see if any dominate E->dest.
960 If so, they may create useful equivalences too.
961
962 ??? If the code gets re-organized to a worklist to catch more
963 indirect opportunities and it is made to handle PHIs then this
964 should only consider use_stmts in basic-blocks we have already visited. */
965 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
966 {
967 gimple *use_stmt = USE_STMT (use_p);
968
969 /* Often the use is in DEST, which we trivially know we can't use.
970 This is cheaper than the dominator set tests below. */
971 if (dest == gimple_bb (use_stmt))
972 continue;
973
974 /* Filter out statements that can never produce a useful
975 equivalence. */
976 tree lhs2 = gimple_get_lhs (use_stmt);
977 if (!lhs2 || TREE_CODE (lhs2) != SSA_NAME)
978 continue;
979
980 /* Profiling has shown the domination tests here can be fairly
981 expensive. We get significant improvements by building the
982 set of blocks that dominate BB. We can then just test
983 for set membership below.
984
985 We also initialize the set lazily since often the only uses
986 are going to be in the same block as DEST. */
987 if (!domby)
988 {
989 domby = BITMAP_ALLOC (NULL);
990 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, dest);
991 while (bb)
992 {
993 bitmap_set_bit (domby, bb->index);
994 bb = get_immediate_dominator (CDI_DOMINATORS, bb);
995 }
996 }
997
998 /* This tests if USE_STMT does not dominate DEST. */
999 if (!bitmap_bit_p (domby, gimple_bb (use_stmt)->index))
1000 continue;
1001
1002 /* At this point USE_STMT dominates DEST and may result in a
1003 useful equivalence. Try to simplify its RHS to a constant
1004 or SSA_NAME. */
1005 tree res = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize,
1006 no_follow_ssa_edges);
1007 if (res && (TREE_CODE (res) == SSA_NAME || is_gimple_min_invariant (res)))
1008 record_equality (lhs2, res, const_and_copies);
1009 }
1010
1011 if (domby)
1012 BITMAP_FREE (domby);
1013 }
1014
1015 /* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied
1016 by traversing edge E (which are cached in E->aux).
1017
1018 Callers are responsible for managing the unwinding markers. */
1019 void
record_temporary_equivalences(edge e,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack)1020 record_temporary_equivalences (edge e,
1021 class const_and_copies *const_and_copies,
1022 class avail_exprs_stack *avail_exprs_stack)
1023 {
1024 int i;
1025 class edge_info *edge_info = (class edge_info *) e->aux;
1026
1027 /* If we have info associated with this edge, record it into
1028 our equivalence tables. */
1029 if (edge_info)
1030 {
1031 cond_equivalence *eq;
1032 /* If we have 0 = COND or 1 = COND equivalences, record them
1033 into our expression hash tables. */
1034 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1035 avail_exprs_stack->record_cond (eq);
1036
1037 edge_info::equiv_pair *seq;
1038 for (i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
1039 {
1040 tree lhs = seq->first;
1041 if (!lhs || TREE_CODE (lhs) != SSA_NAME)
1042 continue;
1043
1044 /* Record the simple NAME = VALUE equivalence. */
1045 tree rhs = seq->second;
1046
1047 /* If this is a SSA_NAME = SSA_NAME equivalence and one operand is
1048 cheaper to compute than the other, then set up the equivalence
1049 such that we replace the expensive one with the cheap one.
1050
1051 If they are the same cost to compute, then do not record
1052 anything. */
1053 if (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs) == SSA_NAME)
1054 {
1055 gimple *rhs_def = SSA_NAME_DEF_STMT (rhs);
1056 int rhs_cost = estimate_num_insns (rhs_def, &eni_size_weights);
1057
1058 gimple *lhs_def = SSA_NAME_DEF_STMT (lhs);
1059 int lhs_cost = estimate_num_insns (lhs_def, &eni_size_weights);
1060
1061 if (rhs_cost > lhs_cost)
1062 record_equality (rhs, lhs, const_and_copies);
1063 else if (rhs_cost < lhs_cost)
1064 record_equality (lhs, rhs, const_and_copies);
1065 }
1066 else
1067 record_equality (lhs, rhs, const_and_copies);
1068
1069
1070 /* Any equivalence found for LHS may result in additional
1071 equivalences for other uses of LHS that we have already
1072 processed. */
1073 back_propagate_equivalences (lhs, e, const_and_copies);
1074 }
1075 }
1076 }
1077
1078 /* PHI nodes can create equivalences too.
1079
1080 Ignoring any alternatives which are the same as the result, if
1081 all the alternatives are equal, then the PHI node creates an
1082 equivalence. */
1083
1084 static void
record_equivalences_from_phis(basic_block bb)1085 record_equivalences_from_phis (basic_block bb)
1086 {
1087 gphi_iterator gsi;
1088
1089 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
1090 {
1091 gphi *phi = gsi.phi ();
1092
1093 /* We might eliminate the PHI, so advance GSI now. */
1094 gsi_next (&gsi);
1095
1096 tree lhs = gimple_phi_result (phi);
1097 tree rhs = NULL;
1098 size_t i;
1099
1100 for (i = 0; i < gimple_phi_num_args (phi); i++)
1101 {
1102 tree t = gimple_phi_arg_def (phi, i);
1103
1104 /* Ignore alternatives which are the same as our LHS. Since
1105 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1106 can simply compare pointers. */
1107 if (lhs == t)
1108 continue;
1109
1110 /* If the associated edge is not marked as executable, then it
1111 can be ignored. */
1112 if ((gimple_phi_arg_edge (phi, i)->flags & EDGE_EXECUTABLE) == 0)
1113 continue;
1114
1115 t = dom_valueize (t);
1116
1117 /* If T is an SSA_NAME and its associated edge is a backedge,
1118 then quit as we cannot utilize this equivalence. */
1119 if (TREE_CODE (t) == SSA_NAME
1120 && (gimple_phi_arg_edge (phi, i)->flags & EDGE_DFS_BACK))
1121 break;
1122
1123 /* If we have not processed an alternative yet, then set
1124 RHS to this alternative. */
1125 if (rhs == NULL)
1126 rhs = t;
1127 /* If we have processed an alternative (stored in RHS), then
1128 see if it is equal to this one. If it isn't, then stop
1129 the search. */
1130 else if (! operand_equal_for_phi_arg_p (rhs, t))
1131 break;
1132 }
1133
1134 /* If we had no interesting alternatives, then all the RHS alternatives
1135 must have been the same as LHS. */
1136 if (!rhs)
1137 rhs = lhs;
1138
1139 /* If we managed to iterate through each PHI alternative without
1140 breaking out of the loop, then we have a PHI which may create
1141 a useful equivalence. We do not need to record unwind data for
1142 this, since this is a true assignment and not an equivalence
1143 inferred from a comparison. All uses of this ssa name are dominated
1144 by this assignment, so unwinding just costs time and space. */
1145 if (i == gimple_phi_num_args (phi))
1146 {
1147 if (may_propagate_copy (lhs, rhs))
1148 set_ssa_name_value (lhs, rhs);
1149 else if (virtual_operand_p (lhs))
1150 {
1151 gimple *use_stmt;
1152 imm_use_iterator iter;
1153 use_operand_p use_p;
1154 /* For virtual operands we have to propagate into all uses as
1155 otherwise we will create overlapping life-ranges. */
1156 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
1157 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
1158 SET_USE (use_p, rhs);
1159 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
1160 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
1161 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (phi);
1162 remove_phi_node (&tmp_gsi, true);
1163 }
1164 }
1165 }
1166 }
1167
1168 /* Record any equivalences created by the incoming edge to BB into
1169 CONST_AND_COPIES and AVAIL_EXPRS_STACK. If BB has more than one
1170 incoming edge, then no equivalence is created. */
1171
1172 static void
record_equivalences_from_incoming_edge(basic_block bb,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack)1173 record_equivalences_from_incoming_edge (basic_block bb,
1174 class const_and_copies *const_and_copies,
1175 class avail_exprs_stack *avail_exprs_stack)
1176 {
1177 edge e;
1178 basic_block parent;
1179
1180 /* If our parent block ended with a control statement, then we may be
1181 able to record some equivalences based on which outgoing edge from
1182 the parent was followed. */
1183 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1184
1185 e = single_pred_edge_ignoring_loop_edges (bb, true);
1186
1187 /* If we had a single incoming edge from our parent block, then enter
1188 any data associated with the edge into our tables. */
1189 if (e && e->src == parent)
1190 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
1191 }
1192
1193 /* Dump statistics for the hash table HTAB. */
1194
1195 static void
htab_statistics(FILE * file,const hash_table<expr_elt_hasher> & htab)1196 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1197 {
1198 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1199 (long) htab.size (),
1200 (long) htab.elements (),
1201 htab.collisions ());
1202 }
1203
1204 /* Dump SSA statistics on FILE. */
1205
1206 static void
dump_dominator_optimization_stats(FILE * file,hash_table<expr_elt_hasher> * avail_exprs)1207 dump_dominator_optimization_stats (FILE *file,
1208 hash_table<expr_elt_hasher> *avail_exprs)
1209 {
1210 fprintf (file, "Total number of statements: %6ld\n\n",
1211 opt_stats.num_stmts);
1212 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1213 opt_stats.num_exprs_considered);
1214
1215 fprintf (file, "\nHash table statistics:\n");
1216
1217 fprintf (file, " avail_exprs: ");
1218 htab_statistics (file, *avail_exprs);
1219 }
1220
1221
1222 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1223 This constrains the cases in which we may treat this as assignment. */
1224
1225 static void
record_equality(tree x,tree y,class const_and_copies * const_and_copies)1226 record_equality (tree x, tree y, class const_and_copies *const_and_copies)
1227 {
1228 tree prev_x = NULL, prev_y = NULL;
1229
1230 if (tree_swap_operands_p (x, y))
1231 std::swap (x, y);
1232
1233 /* Most of the time tree_swap_operands_p does what we want. But there
1234 are cases where we know one operand is better for copy propagation than
1235 the other. Given no other code cares about ordering of equality
1236 comparison operators for that purpose, we just handle the special cases
1237 here. */
1238 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1239 {
1240 /* If one operand is a single use operand, then make it
1241 X. This will preserve its single use properly and if this
1242 conditional is eliminated, the computation of X can be
1243 eliminated as well. */
1244 if (has_single_use (y) && ! has_single_use (x))
1245 std::swap (x, y);
1246 }
1247 if (TREE_CODE (x) == SSA_NAME)
1248 prev_x = SSA_NAME_VALUE (x);
1249 if (TREE_CODE (y) == SSA_NAME)
1250 prev_y = SSA_NAME_VALUE (y);
1251
1252 /* If one of the previous values is invariant, or invariant in more loops
1253 (by depth), then use that.
1254 Otherwise it doesn't matter which value we choose, just so
1255 long as we canonicalize on one value. */
1256 if (is_gimple_min_invariant (y))
1257 ;
1258 else if (is_gimple_min_invariant (x))
1259 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1260 else if (prev_x && is_gimple_min_invariant (prev_x))
1261 x = y, y = prev_x, prev_x = prev_y;
1262 else if (prev_y)
1263 y = prev_y;
1264
1265 /* After the swapping, we must have one SSA_NAME. */
1266 if (TREE_CODE (x) != SSA_NAME)
1267 return;
1268
1269 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1270 variable compared against zero. If we're honoring signed zeros,
1271 then we cannot record this value unless we know that the value is
1272 nonzero. */
1273 if (HONOR_SIGNED_ZEROS (x)
1274 && (TREE_CODE (y) != REAL_CST
1275 || real_equal (&dconst0, &TREE_REAL_CST (y))))
1276 return;
1277
1278 const_and_copies->record_const_or_copy (x, y, prev_x);
1279 }
1280
1281 /* Returns true when STMT is a simple iv increment. It detects the
1282 following situation:
1283
1284 i_1 = phi (..., i_k)
1285 [...]
1286 i_j = i_{j-1} for each j : 2 <= j <= k-1
1287 [...]
1288 i_k = i_{k-1} +/- ... */
1289
1290 bool
simple_iv_increment_p(gimple * stmt)1291 simple_iv_increment_p (gimple *stmt)
1292 {
1293 enum tree_code code;
1294 tree lhs, preinc;
1295 gimple *phi;
1296 size_t i;
1297
1298 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1299 return false;
1300
1301 lhs = gimple_assign_lhs (stmt);
1302 if (TREE_CODE (lhs) != SSA_NAME)
1303 return false;
1304
1305 code = gimple_assign_rhs_code (stmt);
1306 if (code != PLUS_EXPR
1307 && code != MINUS_EXPR
1308 && code != POINTER_PLUS_EXPR)
1309 return false;
1310
1311 preinc = gimple_assign_rhs1 (stmt);
1312 if (TREE_CODE (preinc) != SSA_NAME)
1313 return false;
1314
1315 phi = SSA_NAME_DEF_STMT (preinc);
1316 while (gimple_code (phi) != GIMPLE_PHI)
1317 {
1318 /* Follow trivial copies, but not the DEF used in a back edge,
1319 so that we don't prevent coalescing. */
1320 if (!gimple_assign_ssa_name_copy_p (phi))
1321 return false;
1322 preinc = gimple_assign_rhs1 (phi);
1323 phi = SSA_NAME_DEF_STMT (preinc);
1324 }
1325
1326 for (i = 0; i < gimple_phi_num_args (phi); i++)
1327 if (gimple_phi_arg_def (phi, i) == lhs)
1328 return true;
1329
1330 return false;
1331 }
1332
1333 /* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the
1334 successors of BB. */
1335
1336 static void
cprop_into_successor_phis(basic_block bb,class const_and_copies * const_and_copies)1337 cprop_into_successor_phis (basic_block bb,
1338 class const_and_copies *const_and_copies)
1339 {
1340 edge e;
1341 edge_iterator ei;
1342
1343 FOR_EACH_EDGE (e, ei, bb->succs)
1344 {
1345 int indx;
1346 gphi_iterator gsi;
1347
1348 /* If this is an abnormal edge, then we do not want to copy propagate
1349 into the PHI alternative associated with this edge. */
1350 if (e->flags & EDGE_ABNORMAL)
1351 continue;
1352
1353 gsi = gsi_start_phis (e->dest);
1354 if (gsi_end_p (gsi))
1355 continue;
1356
1357 /* We may have an equivalence associated with this edge. While
1358 we cannot propagate it into non-dominated blocks, we can
1359 propagate them into PHIs in non-dominated blocks. */
1360
1361 /* Push the unwind marker so we can reset the const and copies
1362 table back to its original state after processing this edge. */
1363 const_and_copies->push_marker ();
1364
1365 /* Extract and record any simple NAME = VALUE equivalences.
1366
1367 Don't bother with [01] = COND equivalences, they're not useful
1368 here. */
1369 class edge_info *edge_info = (class edge_info *) e->aux;
1370
1371 if (edge_info)
1372 {
1373 edge_info::equiv_pair *seq;
1374 for (int i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
1375 {
1376 tree lhs = seq->first;
1377 tree rhs = seq->second;
1378
1379 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1380 const_and_copies->record_const_or_copy (lhs, rhs);
1381 }
1382
1383 }
1384
1385 indx = e->dest_idx;
1386 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1387 {
1388 tree new_val;
1389 use_operand_p orig_p;
1390 tree orig_val;
1391 gphi *phi = gsi.phi ();
1392
1393 /* The alternative may be associated with a constant, so verify
1394 it is an SSA_NAME before doing anything with it. */
1395 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1396 orig_val = get_use_from_ptr (orig_p);
1397 if (TREE_CODE (orig_val) != SSA_NAME)
1398 continue;
1399
1400 /* If we have *ORIG_P in our constant/copy table, then replace
1401 ORIG_P with its value in our constant/copy table. */
1402 new_val = SSA_NAME_VALUE (orig_val);
1403 if (new_val
1404 && new_val != orig_val
1405 && may_propagate_copy (orig_val, new_val))
1406 propagate_value (orig_p, new_val);
1407 }
1408
1409 const_and_copies->pop_to_marker ();
1410 }
1411 }
1412
1413 edge
before_dom_children(basic_block bb)1414 dom_opt_dom_walker::before_dom_children (basic_block bb)
1415 {
1416 gimple_stmt_iterator gsi;
1417
1418 if (dump_file && (dump_flags & TDF_DETAILS))
1419 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1420
1421 evrp_range_analyzer.enter (bb);
1422
1423 /* Push a marker on the stacks of local information so that we know how
1424 far to unwind when we finalize this block. */
1425 m_avail_exprs_stack->push_marker ();
1426 m_const_and_copies->push_marker ();
1427
1428 record_equivalences_from_incoming_edge (bb, m_const_and_copies,
1429 m_avail_exprs_stack);
1430
1431 /* PHI nodes can create equivalences too. */
1432 record_equivalences_from_phis (bb);
1433
1434 /* Create equivalences from redundant PHIs. PHIs are only truly
1435 redundant when they exist in the same block, so push another
1436 marker and unwind right afterwards. */
1437 m_avail_exprs_stack->push_marker ();
1438 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1439 eliminate_redundant_computations (&gsi, m_const_and_copies,
1440 m_avail_exprs_stack);
1441 m_avail_exprs_stack->pop_to_marker ();
1442
1443 edge taken_edge = NULL;
1444 /* Initialize visited flag ahead of us, it has undefined state on
1445 pass entry. */
1446 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1447 gimple_set_visited (gsi_stmt (gsi), false);
1448 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
1449 {
1450 /* Do not optimize a stmt twice, substitution might end up with
1451 _3 = _3 which is not valid. */
1452 if (gimple_visited_p (gsi_stmt (gsi)))
1453 {
1454 gsi_next (&gsi);
1455 continue;
1456 }
1457
1458 /* Compute range information and optimize the stmt. */
1459 evrp_range_analyzer.record_ranges_from_stmt (gsi_stmt (gsi), false);
1460 bool removed_p = false;
1461 taken_edge = this->optimize_stmt (bb, &gsi, &removed_p);
1462 if (!removed_p)
1463 gimple_set_visited (gsi_stmt (gsi), true);
1464
1465 /* Go back and visit stmts inserted by folding after substituting
1466 into the stmt at gsi. */
1467 if (gsi_end_p (gsi))
1468 {
1469 gcc_checking_assert (removed_p);
1470 gsi = gsi_last_bb (bb);
1471 while (!gsi_end_p (gsi) && !gimple_visited_p (gsi_stmt (gsi)))
1472 gsi_prev (&gsi);
1473 }
1474 else
1475 {
1476 do
1477 {
1478 gsi_prev (&gsi);
1479 }
1480 while (!gsi_end_p (gsi) && !gimple_visited_p (gsi_stmt (gsi)));
1481 }
1482 if (gsi_end_p (gsi))
1483 gsi = gsi_start_bb (bb);
1484 else
1485 gsi_next (&gsi);
1486 }
1487
1488 /* Now prepare to process dominated blocks. */
1489 record_edge_info (bb);
1490 cprop_into_successor_phis (bb, m_const_and_copies);
1491 if (taken_edge && !dbg_cnt (dom_unreachable_edges))
1492 return NULL;
1493
1494 return taken_edge;
1495 }
1496
1497 /* We have finished processing the dominator children of BB, perform
1498 any finalization actions in preparation for leaving this node in
1499 the dominator tree. */
1500
1501 void
after_dom_children(basic_block bb)1502 dom_opt_dom_walker::after_dom_children (basic_block bb)
1503 {
1504 x_vr_values = &evrp_range_analyzer;
1505 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
1506 m_avail_exprs_stack,
1507 &evrp_range_analyzer,
1508 simplify_stmt_for_jump_threading);
1509 x_vr_values = NULL;
1510
1511 /* These remove expressions local to BB from the tables. */
1512 m_avail_exprs_stack->pop_to_marker ();
1513 m_const_and_copies->pop_to_marker ();
1514 evrp_range_analyzer.leave (bb);
1515 }
1516
1517 /* Search for redundant computations in STMT. If any are found, then
1518 replace them with the variable holding the result of the computation.
1519
1520 If safe, record this expression into AVAIL_EXPRS_STACK and
1521 CONST_AND_COPIES. */
1522
1523 static void
eliminate_redundant_computations(gimple_stmt_iterator * gsi,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack)1524 eliminate_redundant_computations (gimple_stmt_iterator* gsi,
1525 class const_and_copies *const_and_copies,
1526 class avail_exprs_stack *avail_exprs_stack)
1527 {
1528 tree expr_type;
1529 tree cached_lhs;
1530 tree def;
1531 bool insert = true;
1532 bool assigns_var_p = false;
1533
1534 gimple *stmt = gsi_stmt (*gsi);
1535
1536 if (gimple_code (stmt) == GIMPLE_PHI)
1537 def = gimple_phi_result (stmt);
1538 else
1539 def = gimple_get_lhs (stmt);
1540
1541 /* Certain expressions on the RHS can be optimized away, but cannot
1542 themselves be entered into the hash tables. */
1543 if (! def
1544 || TREE_CODE (def) != SSA_NAME
1545 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1546 || gimple_vdef (stmt)
1547 /* Do not record equivalences for increments of ivs. This would create
1548 overlapping live ranges for a very questionable gain. */
1549 || simple_iv_increment_p (stmt))
1550 insert = false;
1551
1552 /* Check if the expression has been computed before. */
1553 cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, insert, true);
1554
1555 opt_stats.num_exprs_considered++;
1556
1557 /* Get the type of the expression we are trying to optimize. */
1558 if (is_gimple_assign (stmt))
1559 {
1560 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1561 assigns_var_p = true;
1562 }
1563 else if (gimple_code (stmt) == GIMPLE_COND)
1564 expr_type = boolean_type_node;
1565 else if (is_gimple_call (stmt))
1566 {
1567 gcc_assert (gimple_call_lhs (stmt));
1568 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1569 assigns_var_p = true;
1570 }
1571 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1572 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
1573 else if (gimple_code (stmt) == GIMPLE_PHI)
1574 /* We can't propagate into a phi, so the logic below doesn't apply.
1575 Instead record an equivalence between the cached LHS and the
1576 PHI result of this statement, provided they are in the same block.
1577 This should be sufficient to kill the redundant phi. */
1578 {
1579 if (def && cached_lhs)
1580 const_and_copies->record_const_or_copy (def, cached_lhs);
1581 return;
1582 }
1583 else
1584 gcc_unreachable ();
1585
1586 if (!cached_lhs)
1587 return;
1588
1589 /* It is safe to ignore types here since we have already done
1590 type checking in the hashing and equality routines. In fact
1591 type checking here merely gets in the way of constant
1592 propagation. Also, make sure that it is safe to propagate
1593 CACHED_LHS into the expression in STMT. */
1594 if ((TREE_CODE (cached_lhs) != SSA_NAME
1595 && (assigns_var_p
1596 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1597 || may_propagate_copy_into_stmt (stmt, cached_lhs))
1598 {
1599 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
1600 || is_gimple_min_invariant (cached_lhs));
1601
1602 if (dump_file && (dump_flags & TDF_DETAILS))
1603 {
1604 fprintf (dump_file, " Replaced redundant expr '");
1605 print_gimple_expr (dump_file, stmt, 0, dump_flags);
1606 fprintf (dump_file, "' with '");
1607 print_generic_expr (dump_file, cached_lhs, dump_flags);
1608 fprintf (dump_file, "'\n");
1609 }
1610
1611 opt_stats.num_re++;
1612
1613 if (assigns_var_p
1614 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1615 cached_lhs = fold_convert (expr_type, cached_lhs);
1616
1617 propagate_tree_value_into_stmt (gsi, cached_lhs);
1618
1619 /* Since it is always necessary to mark the result as modified,
1620 perhaps we should move this into propagate_tree_value_into_stmt
1621 itself. */
1622 gimple_set_modified (gsi_stmt (*gsi), true);
1623 }
1624 }
1625
1626 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1627 the available expressions table or the const_and_copies table.
1628 Detect and record those equivalences into AVAIL_EXPRS_STACK.
1629
1630 We handle only very simple copy equivalences here. The heavy
1631 lifing is done by eliminate_redundant_computations. */
1632
1633 static void
record_equivalences_from_stmt(gimple * stmt,int may_optimize_p,class avail_exprs_stack * avail_exprs_stack)1634 record_equivalences_from_stmt (gimple *stmt, int may_optimize_p,
1635 class avail_exprs_stack *avail_exprs_stack)
1636 {
1637 tree lhs;
1638 enum tree_code lhs_code;
1639
1640 gcc_assert (is_gimple_assign (stmt));
1641
1642 lhs = gimple_assign_lhs (stmt);
1643 lhs_code = TREE_CODE (lhs);
1644
1645 if (lhs_code == SSA_NAME
1646 && gimple_assign_single_p (stmt))
1647 {
1648 tree rhs = gimple_assign_rhs1 (stmt);
1649
1650 /* If the RHS of the assignment is a constant or another variable that
1651 may be propagated, register it in the CONST_AND_COPIES table. We
1652 do not need to record unwind data for this, since this is a true
1653 assignment and not an equivalence inferred from a comparison. All
1654 uses of this ssa name are dominated by this assignment, so unwinding
1655 just costs time and space. */
1656 if (may_optimize_p
1657 && (TREE_CODE (rhs) == SSA_NAME
1658 || is_gimple_min_invariant (rhs)))
1659 {
1660 rhs = dom_valueize (rhs);
1661
1662 if (dump_file && (dump_flags & TDF_DETAILS))
1663 {
1664 fprintf (dump_file, "==== ASGN ");
1665 print_generic_expr (dump_file, lhs);
1666 fprintf (dump_file, " = ");
1667 print_generic_expr (dump_file, rhs);
1668 fprintf (dump_file, "\n");
1669 }
1670
1671 set_ssa_name_value (lhs, rhs);
1672 }
1673 }
1674
1675 /* Make sure we can propagate &x + CST. */
1676 if (lhs_code == SSA_NAME
1677 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
1678 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
1679 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
1680 {
1681 tree op0 = gimple_assign_rhs1 (stmt);
1682 tree op1 = gimple_assign_rhs2 (stmt);
1683 tree new_rhs
1684 = build1 (ADDR_EXPR, TREE_TYPE (op0),
1685 fold_build2 (MEM_REF, TREE_TYPE (TREE_TYPE (op0)),
1686 unshare_expr (op0), fold_convert (ptr_type_node,
1687 op1)));
1688 if (dump_file && (dump_flags & TDF_DETAILS))
1689 {
1690 fprintf (dump_file, "==== ASGN ");
1691 print_generic_expr (dump_file, lhs);
1692 fprintf (dump_file, " = ");
1693 print_generic_expr (dump_file, new_rhs);
1694 fprintf (dump_file, "\n");
1695 }
1696
1697 set_ssa_name_value (lhs, new_rhs);
1698 }
1699
1700 /* A memory store, even an aliased store, creates a useful
1701 equivalence. By exchanging the LHS and RHS, creating suitable
1702 vops and recording the result in the available expression table,
1703 we may be able to expose more redundant loads. */
1704 if (!gimple_has_volatile_ops (stmt)
1705 && gimple_references_memory_p (stmt)
1706 && gimple_assign_single_p (stmt)
1707 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1708 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1709 && !is_gimple_reg (lhs))
1710 {
1711 tree rhs = gimple_assign_rhs1 (stmt);
1712 gassign *new_stmt;
1713
1714 /* Build a new statement with the RHS and LHS exchanged. */
1715 if (TREE_CODE (rhs) == SSA_NAME)
1716 {
1717 /* NOTE tuples. The call to gimple_build_assign below replaced
1718 a call to build_gimple_modify_stmt, which did not set the
1719 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1720 may cause an SSA validation failure, as the LHS may be a
1721 default-initialized name and should have no definition. I'm
1722 a bit dubious of this, as the artificial statement that we
1723 generate here may in fact be ill-formed, but it is simply
1724 used as an internal device in this pass, and never becomes
1725 part of the CFG. */
1726 gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
1727 new_stmt = gimple_build_assign (rhs, lhs);
1728 SSA_NAME_DEF_STMT (rhs) = defstmt;
1729 }
1730 else
1731 new_stmt = gimple_build_assign (rhs, lhs);
1732
1733 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1734
1735 /* Finally enter the statement into the available expression
1736 table. */
1737 avail_exprs_stack->lookup_avail_expr (new_stmt, true, true);
1738 }
1739 }
1740
1741 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1742 CONST_AND_COPIES. */
1743
1744 static void
cprop_operand(gimple * stmt,use_operand_p op_p,vr_values * vr_values)1745 cprop_operand (gimple *stmt, use_operand_p op_p, vr_values *vr_values)
1746 {
1747 tree val;
1748 tree op = USE_FROM_PTR (op_p);
1749
1750 /* If the operand has a known constant value or it is known to be a
1751 copy of some other variable, use the value or copy stored in
1752 CONST_AND_COPIES. */
1753 val = SSA_NAME_VALUE (op);
1754 if (!val)
1755 val = vr_values->op_with_constant_singleton_value_range (op);
1756
1757 if (val && val != op)
1758 {
1759 /* Do not replace hard register operands in asm statements. */
1760 if (gimple_code (stmt) == GIMPLE_ASM
1761 && !may_propagate_copy_into_asm (op))
1762 return;
1763
1764 /* Certain operands are not allowed to be copy propagated due
1765 to their interaction with exception handling and some GCC
1766 extensions. */
1767 if (!may_propagate_copy (op, val))
1768 return;
1769
1770 /* Do not propagate copies into BIVs.
1771 See PR23821 and PR62217 for how this can disturb IV and
1772 number of iteration analysis. */
1773 if (TREE_CODE (val) != INTEGER_CST)
1774 {
1775 gimple *def = SSA_NAME_DEF_STMT (op);
1776 if (gimple_code (def) == GIMPLE_PHI
1777 && gimple_bb (def)->loop_father->header == gimple_bb (def))
1778 return;
1779 }
1780
1781 /* Dump details. */
1782 if (dump_file && (dump_flags & TDF_DETAILS))
1783 {
1784 fprintf (dump_file, " Replaced '");
1785 print_generic_expr (dump_file, op, dump_flags);
1786 fprintf (dump_file, "' with %s '",
1787 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
1788 print_generic_expr (dump_file, val, dump_flags);
1789 fprintf (dump_file, "'\n");
1790 }
1791
1792 if (TREE_CODE (val) != SSA_NAME)
1793 opt_stats.num_const_prop++;
1794 else
1795 opt_stats.num_copy_prop++;
1796
1797 propagate_value (op_p, val);
1798
1799 /* And note that we modified this statement. This is now
1800 safe, even if we changed virtual operands since we will
1801 rescan the statement and rewrite its operands again. */
1802 gimple_set_modified (stmt, true);
1803 }
1804 }
1805
1806 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1807 known value for that SSA_NAME (or NULL if no value is known).
1808
1809 Propagate values from CONST_AND_COPIES into the uses, vuses and
1810 vdef_ops of STMT. */
1811
1812 static void
cprop_into_stmt(gimple * stmt,vr_values * vr_values)1813 cprop_into_stmt (gimple *stmt, vr_values *vr_values)
1814 {
1815 use_operand_p op_p;
1816 ssa_op_iter iter;
1817 tree last_copy_propagated_op = NULL;
1818
1819 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
1820 {
1821 tree old_op = USE_FROM_PTR (op_p);
1822
1823 /* If we have A = B and B = A in the copy propagation tables
1824 (due to an equality comparison), avoid substituting B for A
1825 then A for B in the trivially discovered cases. This allows
1826 optimization of statements were A and B appear as input
1827 operands. */
1828 if (old_op != last_copy_propagated_op)
1829 {
1830 cprop_operand (stmt, op_p, vr_values);
1831
1832 tree new_op = USE_FROM_PTR (op_p);
1833 if (new_op != old_op && TREE_CODE (new_op) == SSA_NAME)
1834 last_copy_propagated_op = new_op;
1835 }
1836 }
1837 }
1838
1839 /* If STMT contains a relational test, try to convert it into an
1840 equality test if there is only a single value which can ever
1841 make the test true.
1842
1843 For example, if the expression hash table contains:
1844
1845 TRUE = (i <= 1)
1846
1847 And we have a test within statement of i >= 1, then we can safely
1848 rewrite the test as i == 1 since there only a single value where
1849 the test is true.
1850
1851 This is similar to code in VRP. */
1852
1853 static void
test_for_singularity(gimple * stmt,gcond * dummy_cond,avail_exprs_stack * avail_exprs_stack)1854 test_for_singularity (gimple *stmt, gcond *dummy_cond,
1855 avail_exprs_stack *avail_exprs_stack)
1856 {
1857 /* We want to support gimple conditionals as well as assignments
1858 where the RHS contains a conditional. */
1859 if (is_gimple_assign (stmt) || gimple_code (stmt) == GIMPLE_COND)
1860 {
1861 enum tree_code code = ERROR_MARK;
1862 tree lhs, rhs;
1863
1864 /* Extract the condition of interest from both forms we support. */
1865 if (is_gimple_assign (stmt))
1866 {
1867 code = gimple_assign_rhs_code (stmt);
1868 lhs = gimple_assign_rhs1 (stmt);
1869 rhs = gimple_assign_rhs2 (stmt);
1870 }
1871 else if (gimple_code (stmt) == GIMPLE_COND)
1872 {
1873 code = gimple_cond_code (as_a <gcond *> (stmt));
1874 lhs = gimple_cond_lhs (as_a <gcond *> (stmt));
1875 rhs = gimple_cond_rhs (as_a <gcond *> (stmt));
1876 }
1877
1878 /* We're looking for a relational test using LE/GE. Also note we can
1879 canonicalize LT/GT tests against constants into LE/GT tests. */
1880 if (code == LE_EXPR || code == GE_EXPR
1881 || ((code == LT_EXPR || code == GT_EXPR)
1882 && TREE_CODE (rhs) == INTEGER_CST))
1883 {
1884 /* For LT_EXPR and GT_EXPR, canonicalize to LE_EXPR and GE_EXPR. */
1885 if (code == LT_EXPR)
1886 rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (rhs),
1887 rhs, build_int_cst (TREE_TYPE (rhs), 1));
1888
1889 if (code == GT_EXPR)
1890 rhs = fold_build2 (PLUS_EXPR, TREE_TYPE (rhs),
1891 rhs, build_int_cst (TREE_TYPE (rhs), 1));
1892
1893 /* Determine the code we want to check for in the hash table. */
1894 enum tree_code test_code;
1895 if (code == GE_EXPR || code == GT_EXPR)
1896 test_code = LE_EXPR;
1897 else
1898 test_code = GE_EXPR;
1899
1900 /* Update the dummy statement so we can query the hash tables. */
1901 gimple_cond_set_code (dummy_cond, test_code);
1902 gimple_cond_set_lhs (dummy_cond, lhs);
1903 gimple_cond_set_rhs (dummy_cond, rhs);
1904 tree cached_lhs
1905 = avail_exprs_stack->lookup_avail_expr (dummy_cond, false, false);
1906
1907 /* If the lookup returned 1 (true), then the expression we
1908 queried was in the hash table. As a result there is only
1909 one value that makes the original conditional true. Update
1910 STMT accordingly. */
1911 if (cached_lhs && integer_onep (cached_lhs))
1912 {
1913 if (is_gimple_assign (stmt))
1914 {
1915 gimple_assign_set_rhs_code (stmt, EQ_EXPR);
1916 gimple_assign_set_rhs2 (stmt, rhs);
1917 gimple_set_modified (stmt, true);
1918 }
1919 else
1920 {
1921 gimple_set_modified (stmt, true);
1922 gimple_cond_set_code (as_a <gcond *> (stmt), EQ_EXPR);
1923 gimple_cond_set_rhs (as_a <gcond *> (stmt), rhs);
1924 gimple_set_modified (stmt, true);
1925 }
1926 }
1927 }
1928 }
1929 }
1930
1931 /* Optimize the statement in block BB pointed to by iterator SI.
1932
1933 We try to perform some simplistic global redundancy elimination and
1934 constant propagation:
1935
1936 1- To detect global redundancy, we keep track of expressions that have
1937 been computed in this block and its dominators. If we find that the
1938 same expression is computed more than once, we eliminate repeated
1939 computations by using the target of the first one.
1940
1941 2- Constant values and copy assignments. This is used to do very
1942 simplistic constant and copy propagation. When a constant or copy
1943 assignment is found, we map the value on the RHS of the assignment to
1944 the variable in the LHS in the CONST_AND_COPIES table.
1945
1946 3- Very simple redundant store elimination is performed.
1947
1948 4- We can simplify a condition to a constant or from a relational
1949 condition to an equality condition. */
1950
1951 edge
optimize_stmt(basic_block bb,gimple_stmt_iterator * si,bool * removed_p)1952 dom_opt_dom_walker::optimize_stmt (basic_block bb, gimple_stmt_iterator *si,
1953 bool *removed_p)
1954 {
1955 gimple *stmt, *old_stmt;
1956 bool may_optimize_p;
1957 bool modified_p = false;
1958 bool was_noreturn;
1959 edge retval = NULL;
1960
1961 old_stmt = stmt = gsi_stmt (*si);
1962 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
1963
1964 if (dump_file && (dump_flags & TDF_DETAILS))
1965 {
1966 fprintf (dump_file, "Optimizing statement ");
1967 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1968 }
1969
1970 update_stmt_if_modified (stmt);
1971 opt_stats.num_stmts++;
1972
1973 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
1974 cprop_into_stmt (stmt, &evrp_range_analyzer);
1975
1976 /* If the statement has been modified with constant replacements,
1977 fold its RHS before checking for redundant computations. */
1978 if (gimple_modified_p (stmt))
1979 {
1980 tree rhs = NULL;
1981
1982 /* Try to fold the statement making sure that STMT is kept
1983 up to date. */
1984 if (fold_stmt (si))
1985 {
1986 stmt = gsi_stmt (*si);
1987 gimple_set_modified (stmt, true);
1988
1989 if (dump_file && (dump_flags & TDF_DETAILS))
1990 {
1991 fprintf (dump_file, " Folded to: ");
1992 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1993 }
1994 }
1995
1996 /* We only need to consider cases that can yield a gimple operand. */
1997 if (gimple_assign_single_p (stmt))
1998 rhs = gimple_assign_rhs1 (stmt);
1999 else if (gimple_code (stmt) == GIMPLE_GOTO)
2000 rhs = gimple_goto_dest (stmt);
2001 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2002 /* This should never be an ADDR_EXPR. */
2003 rhs = gimple_switch_index (swtch_stmt);
2004
2005 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2006 recompute_tree_invariant_for_addr_expr (rhs);
2007
2008 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2009 even if fold_stmt updated the stmt already and thus cleared
2010 gimple_modified_p flag on it. */
2011 modified_p = true;
2012 }
2013
2014 /* Check for redundant computations. Do this optimization only
2015 for assignments that have no volatile ops and conditionals. */
2016 may_optimize_p = (!gimple_has_side_effects (stmt)
2017 && (is_gimple_assign (stmt)
2018 || (is_gimple_call (stmt)
2019 && gimple_call_lhs (stmt) != NULL_TREE)
2020 || gimple_code (stmt) == GIMPLE_COND
2021 || gimple_code (stmt) == GIMPLE_SWITCH));
2022
2023 if (may_optimize_p)
2024 {
2025 if (gimple_code (stmt) == GIMPLE_CALL)
2026 {
2027 /* Resolve __builtin_constant_p. If it hasn't been
2028 folded to integer_one_node by now, it's fairly
2029 certain that the value simply isn't constant. */
2030 tree callee = gimple_call_fndecl (stmt);
2031 if (callee
2032 && fndecl_built_in_p (callee, BUILT_IN_CONSTANT_P))
2033 {
2034 propagate_tree_value_into_stmt (si, integer_zero_node);
2035 stmt = gsi_stmt (*si);
2036 }
2037 }
2038
2039 if (gimple_code (stmt) == GIMPLE_COND)
2040 {
2041 tree lhs = gimple_cond_lhs (stmt);
2042 tree rhs = gimple_cond_rhs (stmt);
2043
2044 /* If the LHS has a range [0..1] and the RHS has a range ~[0..1],
2045 then this conditional is computable at compile time. We can just
2046 shove either 0 or 1 into the LHS, mark the statement as modified
2047 and all the right things will just happen below.
2048
2049 Note this would apply to any case where LHS has a range
2050 narrower than its type implies and RHS is outside that
2051 narrower range. Future work. */
2052 if (TREE_CODE (lhs) == SSA_NAME
2053 && ssa_name_has_boolean_range (lhs)
2054 && TREE_CODE (rhs) == INTEGER_CST
2055 && ! (integer_zerop (rhs) || integer_onep (rhs)))
2056 {
2057 gimple_cond_set_lhs (as_a <gcond *> (stmt),
2058 fold_convert (TREE_TYPE (lhs),
2059 integer_zero_node));
2060 gimple_set_modified (stmt, true);
2061 }
2062 else if (TREE_CODE (lhs) == SSA_NAME)
2063 {
2064 /* Exploiting EVRP data is not yet fully integrated into DOM
2065 but we need to do something for this case to avoid regressing
2066 udr4.f90 and new1.C which have unexecutable blocks with
2067 undefined behavior that get diagnosed if they're left in the
2068 IL because we've attached range information to new
2069 SSA_NAMES. */
2070 update_stmt_if_modified (stmt);
2071 edge taken_edge = NULL;
2072 evrp_range_analyzer.vrp_visit_cond_stmt (as_a <gcond *> (stmt),
2073 &taken_edge);
2074 if (taken_edge)
2075 {
2076 if (taken_edge->flags & EDGE_TRUE_VALUE)
2077 gimple_cond_make_true (as_a <gcond *> (stmt));
2078 else if (taken_edge->flags & EDGE_FALSE_VALUE)
2079 gimple_cond_make_false (as_a <gcond *> (stmt));
2080 else
2081 gcc_unreachable ();
2082 gimple_set_modified (stmt, true);
2083 update_stmt (stmt);
2084 cfg_altered = true;
2085 return taken_edge;
2086 }
2087 }
2088 }
2089
2090 update_stmt_if_modified (stmt);
2091 eliminate_redundant_computations (si, m_const_and_copies,
2092 m_avail_exprs_stack);
2093 stmt = gsi_stmt (*si);
2094
2095 /* Perform simple redundant store elimination. */
2096 if (gimple_assign_single_p (stmt)
2097 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2098 {
2099 tree lhs = gimple_assign_lhs (stmt);
2100 tree rhs = gimple_assign_rhs1 (stmt);
2101 tree cached_lhs;
2102 gassign *new_stmt;
2103 rhs = dom_valueize (rhs);
2104 /* Build a new statement with the RHS and LHS exchanged. */
2105 if (TREE_CODE (rhs) == SSA_NAME)
2106 {
2107 gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
2108 new_stmt = gimple_build_assign (rhs, lhs);
2109 SSA_NAME_DEF_STMT (rhs) = defstmt;
2110 }
2111 else
2112 new_stmt = gimple_build_assign (rhs, lhs);
2113 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2114 expr_hash_elt *elt = NULL;
2115 cached_lhs = m_avail_exprs_stack->lookup_avail_expr (new_stmt, false,
2116 false, &elt);
2117 if (cached_lhs
2118 && operand_equal_p (rhs, cached_lhs, 0)
2119 && refs_same_for_tbaa_p (elt->expr ()->kind == EXPR_SINGLE
2120 ? elt->expr ()->ops.single.rhs
2121 : NULL_TREE, lhs))
2122 {
2123 basic_block bb = gimple_bb (stmt);
2124 unlink_stmt_vdef (stmt);
2125 if (gsi_remove (si, true))
2126 {
2127 bitmap_set_bit (need_eh_cleanup, bb->index);
2128 if (dump_file && (dump_flags & TDF_DETAILS))
2129 fprintf (dump_file, " Flagged to clear EH edges.\n");
2130 }
2131 release_defs (stmt);
2132 *removed_p = true;
2133 return retval;
2134 }
2135 }
2136
2137 /* If this statement was not redundant, we may still be able to simplify
2138 it, which may in turn allow other part of DOM or other passes to do
2139 a better job. */
2140 test_for_singularity (stmt, m_dummy_cond, m_avail_exprs_stack);
2141 }
2142
2143 /* Record any additional equivalences created by this statement. */
2144 if (is_gimple_assign (stmt))
2145 record_equivalences_from_stmt (stmt, may_optimize_p, m_avail_exprs_stack);
2146
2147 /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may
2148 know where it goes. */
2149 if (gimple_modified_p (stmt) || modified_p)
2150 {
2151 tree val = NULL;
2152
2153 if (gimple_code (stmt) == GIMPLE_COND)
2154 val = fold_binary_loc (gimple_location (stmt),
2155 gimple_cond_code (stmt), boolean_type_node,
2156 gimple_cond_lhs (stmt),
2157 gimple_cond_rhs (stmt));
2158 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2159 val = gimple_switch_index (swtch_stmt);
2160
2161 if (val && TREE_CODE (val) == INTEGER_CST)
2162 {
2163 retval = find_taken_edge (bb, val);
2164 if (retval)
2165 {
2166 /* Fix the condition to be either true or false. */
2167 if (gimple_code (stmt) == GIMPLE_COND)
2168 {
2169 if (integer_zerop (val))
2170 gimple_cond_make_false (as_a <gcond *> (stmt));
2171 else if (integer_onep (val))
2172 gimple_cond_make_true (as_a <gcond *> (stmt));
2173 else
2174 gcc_unreachable ();
2175
2176 gimple_set_modified (stmt, true);
2177 }
2178
2179 /* Further simplifications may be possible. */
2180 cfg_altered = true;
2181 }
2182 }
2183
2184 update_stmt_if_modified (stmt);
2185
2186 /* If we simplified a statement in such a way as to be shown that it
2187 cannot trap, update the eh information and the cfg to match. */
2188 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2189 {
2190 bitmap_set_bit (need_eh_cleanup, bb->index);
2191 if (dump_file && (dump_flags & TDF_DETAILS))
2192 fprintf (dump_file, " Flagged to clear EH edges.\n");
2193 }
2194
2195 if (!was_noreturn
2196 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2197 need_noreturn_fixup.safe_push (stmt);
2198 }
2199 return retval;
2200 }
2201