1 /* Combining of if-expressions on trees.
2    Copyright (C) 2007-2021 Free Software Foundation, Inc.
3    Contributed by Richard Guenther <rguenther@suse.de>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "ssa.h"
33 #include "tree-pretty-print.h"
34 /* rtl is needed only because arm back-end requires it for
35    BRANCH_COST.  */
36 #include "fold-const.h"
37 #include "cfganal.h"
38 #include "gimple-fold.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-ssa.h"
43 #include "attribs.h"
44 #include "asan.h"
45 
46 #ifndef LOGICAL_OP_NON_SHORT_CIRCUIT
47 #define LOGICAL_OP_NON_SHORT_CIRCUIT \
48   (BRANCH_COST (optimize_function_for_speed_p (cfun), \
49                 false) >= 2)
50 #endif
51 
52 /* This pass combines COND_EXPRs to simplify control flow.  It
53    currently recognizes bit tests and comparisons in chains that
54    represent logical and or logical or of two COND_EXPRs.
55 
56    It does so by walking basic blocks in a approximate reverse
57    post-dominator order and trying to match CFG patterns that
58    represent logical and or logical or of two COND_EXPRs.
59    Transformations are done if the COND_EXPR conditions match
60    either
61 
62      1. two single bit tests X & (1 << Yn) (for logical and)
63 
64      2. two bit tests X & Yn (for logical or)
65 
66      3. two comparisons X OPn Y (for logical or)
67 
68    To simplify this pass, removing basic blocks and dead code
69    is left to CFG cleanup and DCE.  */
70 
71 
72 /* Recognize a if-then-else CFG pattern starting to match with the
73    COND_BB basic-block containing the COND_EXPR.  The recognized
74    then end else blocks are stored to *THEN_BB and *ELSE_BB.  If
75    *THEN_BB and/or *ELSE_BB are already set, they are required to
76    match the then and else basic-blocks to make the pattern match.
77    Returns true if the pattern matched, false otherwise.  */
78 
79 static bool
recognize_if_then_else(basic_block cond_bb,basic_block * then_bb,basic_block * else_bb)80 recognize_if_then_else (basic_block cond_bb,
81 			basic_block *then_bb, basic_block *else_bb)
82 {
83   edge t, e;
84 
85   if (EDGE_COUNT (cond_bb->succs) != 2)
86     return false;
87 
88   /* Find the then/else edges.  */
89   t = EDGE_SUCC (cond_bb, 0);
90   e = EDGE_SUCC (cond_bb, 1);
91   if (!(t->flags & EDGE_TRUE_VALUE))
92     std::swap (t, e);
93   if (!(t->flags & EDGE_TRUE_VALUE)
94       || !(e->flags & EDGE_FALSE_VALUE))
95     return false;
96 
97   /* Check if the edge destinations point to the required block.  */
98   if (*then_bb
99       && t->dest != *then_bb)
100     return false;
101   if (*else_bb
102       && e->dest != *else_bb)
103     return false;
104 
105   if (!*then_bb)
106     *then_bb = t->dest;
107   if (!*else_bb)
108     *else_bb = e->dest;
109 
110   return true;
111 }
112 
113 /* Verify if the basic block BB does not have side-effects.  Return
114    true in this case, else false.  */
115 
116 static bool
bb_no_side_effects_p(basic_block bb)117 bb_no_side_effects_p (basic_block bb)
118 {
119   gimple_stmt_iterator gsi;
120 
121   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
122     {
123       gimple *stmt = gsi_stmt (gsi);
124 
125       if (is_gimple_debug (stmt))
126 	continue;
127 
128       if (gimple_has_side_effects (stmt)
129 	  || gimple_uses_undefined_value_p (stmt)
130 	  || gimple_could_trap_p (stmt)
131 	  || gimple_vuse (stmt)
132 	  /* const calls don't match any of the above, yet they could
133 	     still have some side-effects - they could contain
134 	     gimple_could_trap_p statements, like floating point
135 	     exceptions or integer division by zero.  See PR70586.
136 	     FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
137 	     should handle this.  */
138 	  || is_gimple_call (stmt))
139 	return false;
140     }
141 
142   return true;
143 }
144 
145 /* Return true if BB is an empty forwarder block to TO_BB.  */
146 
147 static bool
forwarder_block_to(basic_block bb,basic_block to_bb)148 forwarder_block_to (basic_block bb, basic_block to_bb)
149 {
150   return empty_block_p (bb)
151 	 && single_succ_p (bb)
152 	 && single_succ (bb) == to_bb;
153 }
154 
155 /* Verify if all PHI node arguments in DEST for edges from BB1 or
156    BB2 to DEST are the same.  This makes the CFG merge point
157    free from side-effects.  Return true in this case, else false.  */
158 
159 static bool
same_phi_args_p(basic_block bb1,basic_block bb2,basic_block dest)160 same_phi_args_p (basic_block bb1, basic_block bb2, basic_block dest)
161 {
162   edge e1 = find_edge (bb1, dest);
163   edge e2 = find_edge (bb2, dest);
164   gphi_iterator gsi;
165   gphi *phi;
166 
167   for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
168     {
169       phi = gsi.phi ();
170       if (!operand_equal_p (PHI_ARG_DEF_FROM_EDGE (phi, e1),
171 			    PHI_ARG_DEF_FROM_EDGE (phi, e2), 0))
172         return false;
173     }
174 
175   return true;
176 }
177 
178 /* Return the best representative SSA name for CANDIDATE which is used
179    in a bit test.  */
180 
181 static tree
get_name_for_bit_test(tree candidate)182 get_name_for_bit_test (tree candidate)
183 {
184   /* Skip single-use names in favor of using the name from a
185      non-widening conversion definition.  */
186   if (TREE_CODE (candidate) == SSA_NAME
187       && has_single_use (candidate))
188     {
189       gimple *def_stmt = SSA_NAME_DEF_STMT (candidate);
190       if (is_gimple_assign (def_stmt)
191 	  && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
192 	{
193 	  if (TYPE_PRECISION (TREE_TYPE (candidate))
194 	      <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
195 	    return gimple_assign_rhs1 (def_stmt);
196 	}
197     }
198 
199   return candidate;
200 }
201 
202 /* Recognize a single bit test pattern in GIMPLE_COND and its defining
203    statements.  Store the name being tested in *NAME and the bit
204    in *BIT.  The GIMPLE_COND computes *NAME & (1 << *BIT).
205    Returns true if the pattern matched, false otherwise.  */
206 
207 static bool
recognize_single_bit_test(gcond * cond,tree * name,tree * bit,bool inv)208 recognize_single_bit_test (gcond *cond, tree *name, tree *bit, bool inv)
209 {
210   gimple *stmt;
211 
212   /* Get at the definition of the result of the bit test.  */
213   if (gimple_cond_code (cond) != (inv ? EQ_EXPR : NE_EXPR)
214       || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
215       || !integer_zerop (gimple_cond_rhs (cond)))
216     return false;
217   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
218   if (!is_gimple_assign (stmt))
219     return false;
220 
221   /* Look at which bit is tested.  One form to recognize is
222      D.1985_5 = state_3(D) >> control1_4(D);
223      D.1986_6 = (int) D.1985_5;
224      D.1987_7 = op0 & 1;
225      if (D.1987_7 != 0)  */
226   if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
227       && integer_onep (gimple_assign_rhs2 (stmt))
228       && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
229     {
230       tree orig_name = gimple_assign_rhs1 (stmt);
231 
232       /* Look through copies and conversions to eventually
233 	 find the stmt that computes the shift.  */
234       stmt = SSA_NAME_DEF_STMT (orig_name);
235 
236       while (is_gimple_assign (stmt)
237 	     && ((CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))
238 		  && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (stmt)))
239 		      <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))))
240 		  && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
241 		 || gimple_assign_ssa_name_copy_p (stmt)))
242 	stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
243 
244       /* If we found such, decompose it.  */
245       if (is_gimple_assign (stmt)
246 	  && gimple_assign_rhs_code (stmt) == RSHIFT_EXPR)
247 	{
248 	  /* op0 & (1 << op1) */
249 	  *bit = gimple_assign_rhs2 (stmt);
250 	  *name = gimple_assign_rhs1 (stmt);
251 	}
252       else
253 	{
254 	  /* t & 1 */
255 	  *bit = integer_zero_node;
256 	  *name = get_name_for_bit_test (orig_name);
257 	}
258 
259       return true;
260     }
261 
262   /* Another form is
263      D.1987_7 = op0 & (1 << CST)
264      if (D.1987_7 != 0)  */
265   if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
266       && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
267       && integer_pow2p (gimple_assign_rhs2 (stmt)))
268     {
269       *name = gimple_assign_rhs1 (stmt);
270       *bit = build_int_cst (integer_type_node,
271 			    tree_log2 (gimple_assign_rhs2 (stmt)));
272       return true;
273     }
274 
275   /* Another form is
276      D.1986_6 = 1 << control1_4(D)
277      D.1987_7 = op0 & D.1986_6
278      if (D.1987_7 != 0)  */
279   if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
280       && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
281       && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME)
282     {
283       gimple *tmp;
284 
285       /* Both arguments of the BIT_AND_EXPR can be the single-bit
286 	 specifying expression.  */
287       tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
288       if (is_gimple_assign (tmp)
289 	  && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR
290 	  && integer_onep (gimple_assign_rhs1 (tmp)))
291 	{
292 	  *name = gimple_assign_rhs2 (stmt);
293 	  *bit = gimple_assign_rhs2 (tmp);
294 	  return true;
295 	}
296 
297       tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
298       if (is_gimple_assign (tmp)
299 	  && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR
300 	  && integer_onep (gimple_assign_rhs1 (tmp)))
301 	{
302 	  *name = gimple_assign_rhs1 (stmt);
303 	  *bit = gimple_assign_rhs2 (tmp);
304 	  return true;
305 	}
306     }
307 
308   return false;
309 }
310 
311 /* Recognize a bit test pattern in a GIMPLE_COND and its defining
312    statements.  Store the name being tested in *NAME and the bits
313    in *BITS.  The COND_EXPR computes *NAME & *BITS.
314    Returns true if the pattern matched, false otherwise.  */
315 
316 static bool
recognize_bits_test(gcond * cond,tree * name,tree * bits,bool inv)317 recognize_bits_test (gcond *cond, tree *name, tree *bits, bool inv)
318 {
319   gimple *stmt;
320 
321   /* Get at the definition of the result of the bit test.  */
322   if (gimple_cond_code (cond) != (inv ? EQ_EXPR : NE_EXPR)
323       || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
324       || !integer_zerop (gimple_cond_rhs (cond)))
325     return false;
326   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
327   if (!is_gimple_assign (stmt)
328       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR)
329     return false;
330 
331   *name = get_name_for_bit_test (gimple_assign_rhs1 (stmt));
332   *bits = gimple_assign_rhs2 (stmt);
333 
334   return true;
335 }
336 
337 
338 /* Update profile after code in outer_cond_bb was adjusted so
339    outer_cond_bb has no condition.  */
340 
341 static void
update_profile_after_ifcombine(basic_block inner_cond_bb,basic_block outer_cond_bb)342 update_profile_after_ifcombine (basic_block inner_cond_bb,
343 			        basic_block outer_cond_bb)
344 {
345   edge outer_to_inner = find_edge (outer_cond_bb, inner_cond_bb);
346   edge outer2 = (EDGE_SUCC (outer_cond_bb, 0) == outer_to_inner
347 		 ? EDGE_SUCC (outer_cond_bb, 1)
348 		 : EDGE_SUCC (outer_cond_bb, 0));
349   edge inner_taken = EDGE_SUCC (inner_cond_bb, 0);
350   edge inner_not_taken = EDGE_SUCC (inner_cond_bb, 1);
351 
352   if (inner_taken->dest != outer2->dest)
353     std::swap (inner_taken, inner_not_taken);
354   gcc_assert (inner_taken->dest == outer2->dest);
355 
356   /* In the following we assume that inner_cond_bb has single predecessor.  */
357   gcc_assert (single_pred_p (inner_cond_bb));
358 
359   /* Path outer_cond_bb->(outer2) needs to be merged into path
360      outer_cond_bb->(outer_to_inner)->inner_cond_bb->(inner_taken)
361      and probability of inner_not_taken updated.  */
362 
363   inner_cond_bb->count = outer_cond_bb->count;
364 
365   /* Handle special case where inner_taken probability is always. In this case
366      we know that the overall outcome will be always as well, but combining
367      probabilities will be conservative because it does not know that
368      outer2->probability is inverse of outer_to_inner->probability.  */
369   if (inner_taken->probability == profile_probability::always ())
370     ;
371   else
372     inner_taken->probability = outer2->probability + outer_to_inner->probability
373 			       * inner_taken->probability;
374   inner_not_taken->probability = profile_probability::always ()
375 				 - inner_taken->probability;
376 
377   outer_to_inner->probability = profile_probability::always ();
378   outer2->probability = profile_probability::never ();
379 }
380 
381 /* If-convert on a and pattern with a common else block.  The inner
382    if is specified by its INNER_COND_BB, the outer by OUTER_COND_BB.
383    inner_inv, outer_inv and result_inv indicate whether the conditions
384    are inverted.
385    Returns true if the edges to the common else basic-block were merged.  */
386 
387 static bool
ifcombine_ifandif(basic_block inner_cond_bb,bool inner_inv,basic_block outer_cond_bb,bool outer_inv,bool result_inv)388 ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv,
389 		   basic_block outer_cond_bb, bool outer_inv, bool result_inv)
390 {
391   gimple_stmt_iterator gsi;
392   gimple *inner_stmt, *outer_stmt;
393   gcond *inner_cond, *outer_cond;
394   tree name1, name2, bit1, bit2, bits1, bits2;
395 
396   inner_stmt = last_stmt (inner_cond_bb);
397   if (!inner_stmt
398       || gimple_code (inner_stmt) != GIMPLE_COND)
399     return false;
400   inner_cond = as_a <gcond *> (inner_stmt);
401 
402   outer_stmt = last_stmt (outer_cond_bb);
403   if (!outer_stmt
404       || gimple_code (outer_stmt) != GIMPLE_COND)
405     return false;
406   outer_cond = as_a <gcond *> (outer_stmt);
407 
408   /* See if we test a single bit of the same name in both tests.  In
409      that case remove the outer test, merging both else edges,
410      and change the inner one to test for
411      name & (bit1 | bit2) == (bit1 | bit2).  */
412   if (recognize_single_bit_test (inner_cond, &name1, &bit1, inner_inv)
413       && recognize_single_bit_test (outer_cond, &name2, &bit2, outer_inv)
414       && name1 == name2)
415     {
416       tree t, t2;
417 
418       /* Do it.  */
419       gsi = gsi_for_stmt (inner_cond);
420       t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
421 		       build_int_cst (TREE_TYPE (name1), 1), bit1);
422       t2 = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
423 		        build_int_cst (TREE_TYPE (name1), 1), bit2);
424       t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), t, t2);
425       t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
426 				    true, GSI_SAME_STMT);
427       t2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
428       t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE,
429 				     true, GSI_SAME_STMT);
430       t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR,
431 		       boolean_type_node, t2, t);
432       t = canonicalize_cond_expr_cond (t);
433       if (!t)
434 	return false;
435       gimple_cond_set_condition_from_tree (inner_cond, t);
436       update_stmt (inner_cond);
437 
438       /* Leave CFG optimization to cfg_cleanup.  */
439       gimple_cond_set_condition_from_tree (outer_cond,
440 	outer_inv ? boolean_false_node : boolean_true_node);
441       update_stmt (outer_cond);
442 
443       update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
444 
445       if (dump_file)
446 	{
447 	  fprintf (dump_file, "optimizing double bit test to ");
448 	  print_generic_expr (dump_file, name1);
449 	  fprintf (dump_file, " & T == T\nwith temporary T = (1 << ");
450 	  print_generic_expr (dump_file, bit1);
451 	  fprintf (dump_file, ") | (1 << ");
452 	  print_generic_expr (dump_file, bit2);
453 	  fprintf (dump_file, ")\n");
454 	}
455 
456       return true;
457     }
458 
459   /* See if we have two bit tests of the same name in both tests.
460      In that case remove the outer test and change the inner one to
461      test for name & (bits1 | bits2) != 0.  */
462   else if (recognize_bits_test (inner_cond, &name1, &bits1, !inner_inv)
463       && recognize_bits_test (outer_cond, &name2, &bits2, !outer_inv))
464     {
465       gimple_stmt_iterator gsi;
466       tree t;
467 
468       /* Find the common name which is bit-tested.  */
469       if (name1 == name2)
470 	;
471       else if (bits1 == bits2)
472 	{
473 	  std::swap (name2, bits2);
474 	  std::swap (name1, bits1);
475 	}
476       else if (name1 == bits2)
477 	std::swap (name2, bits2);
478       else if (bits1 == name2)
479 	std::swap (name1, bits1);
480       else
481 	return false;
482 
483       /* As we strip non-widening conversions in finding a common
484          name that is tested make sure to end up with an integral
485 	 type for building the bit operations.  */
486       if (TYPE_PRECISION (TREE_TYPE (bits1))
487 	  >= TYPE_PRECISION (TREE_TYPE (bits2)))
488 	{
489 	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
490 	  name1 = fold_convert (TREE_TYPE (bits1), name1);
491 	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
492 	  bits2 = fold_convert (TREE_TYPE (bits1), bits2);
493 	}
494       else
495 	{
496 	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
497 	  name1 = fold_convert (TREE_TYPE (bits2), name1);
498 	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
499 	  bits1 = fold_convert (TREE_TYPE (bits2), bits1);
500 	}
501 
502       /* Do it.  */
503       gsi = gsi_for_stmt (inner_cond);
504       t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2);
505       t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
506 				    true, GSI_SAME_STMT);
507       t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
508       t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
509 				    true, GSI_SAME_STMT);
510       t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR, boolean_type_node, t,
511 		       build_int_cst (TREE_TYPE (t), 0));
512       t = canonicalize_cond_expr_cond (t);
513       if (!t)
514 	return false;
515       gimple_cond_set_condition_from_tree (inner_cond, t);
516       update_stmt (inner_cond);
517 
518       /* Leave CFG optimization to cfg_cleanup.  */
519       gimple_cond_set_condition_from_tree (outer_cond,
520 	outer_inv ? boolean_false_node : boolean_true_node);
521       update_stmt (outer_cond);
522       update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
523 
524       if (dump_file)
525 	{
526 	  fprintf (dump_file, "optimizing bits or bits test to ");
527 	  print_generic_expr (dump_file, name1);
528 	  fprintf (dump_file, " & T != 0\nwith temporary T = ");
529 	  print_generic_expr (dump_file, bits1);
530 	  fprintf (dump_file, " | ");
531 	  print_generic_expr (dump_file, bits2);
532 	  fprintf (dump_file, "\n");
533 	}
534 
535       return true;
536     }
537 
538   /* See if we have two comparisons that we can merge into one.  */
539   else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison
540 	   && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison)
541     {
542       tree t;
543       enum tree_code inner_cond_code = gimple_cond_code (inner_cond);
544       enum tree_code outer_cond_code = gimple_cond_code (outer_cond);
545 
546       /* Invert comparisons if necessary (and possible).  */
547       if (inner_inv)
548 	inner_cond_code = invert_tree_comparison (inner_cond_code,
549 	  HONOR_NANS (gimple_cond_lhs (inner_cond)));
550       if (inner_cond_code == ERROR_MARK)
551 	return false;
552       if (outer_inv)
553 	outer_cond_code = invert_tree_comparison (outer_cond_code,
554 	  HONOR_NANS (gimple_cond_lhs (outer_cond)));
555       if (outer_cond_code == ERROR_MARK)
556 	return false;
557       /* Don't return false so fast, try maybe_fold_or_comparisons?  */
558 
559       if (!(t = maybe_fold_and_comparisons (boolean_type_node, inner_cond_code,
560 					    gimple_cond_lhs (inner_cond),
561 					    gimple_cond_rhs (inner_cond),
562 					    outer_cond_code,
563 					    gimple_cond_lhs (outer_cond),
564 					    gimple_cond_rhs (outer_cond))))
565 	{
566 	  tree t1, t2;
567 	  gimple_stmt_iterator gsi;
568 	  bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
569 	  if (param_logical_op_non_short_circuit != -1)
570 	    logical_op_non_short_circuit
571 	      = param_logical_op_non_short_circuit;
572 	  if (!logical_op_non_short_circuit || sanitize_coverage_p ())
573 	    return false;
574 	  /* Only do this optimization if the inner bb contains only the conditional. */
575 	  if (!gsi_one_before_end_p (gsi_start_nondebug_after_labels_bb (inner_cond_bb)))
576 	    return false;
577 	  t1 = fold_build2_loc (gimple_location (inner_cond),
578 				inner_cond_code,
579 				boolean_type_node,
580 				gimple_cond_lhs (inner_cond),
581 				gimple_cond_rhs (inner_cond));
582 	  t2 = fold_build2_loc (gimple_location (outer_cond),
583 				outer_cond_code,
584 				boolean_type_node,
585 				gimple_cond_lhs (outer_cond),
586 				gimple_cond_rhs (outer_cond));
587 	  t = fold_build2_loc (gimple_location (inner_cond),
588 			       TRUTH_AND_EXPR, boolean_type_node, t1, t2);
589 	  if (result_inv)
590 	    {
591 	      t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
592 	      result_inv = false;
593 	    }
594 	  gsi = gsi_for_stmt (inner_cond);
595 	  t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr, NULL, true,
596 					  GSI_SAME_STMT);
597         }
598       if (result_inv)
599 	t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
600       t = canonicalize_cond_expr_cond (t);
601       if (!t)
602 	return false;
603       if (!is_gimple_condexpr_for_cond (t))
604 	{
605 	  gsi = gsi_for_stmt (inner_cond);
606 	  t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr_for_cond,
607 					  NULL, true, GSI_SAME_STMT);
608 	}
609       gimple_cond_set_condition_from_tree (inner_cond, t);
610       update_stmt (inner_cond);
611 
612       /* Leave CFG optimization to cfg_cleanup.  */
613       gimple_cond_set_condition_from_tree (outer_cond,
614 	outer_inv ? boolean_false_node : boolean_true_node);
615       update_stmt (outer_cond);
616       update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
617 
618       if (dump_file)
619 	{
620 	  fprintf (dump_file, "optimizing two comparisons to ");
621 	  print_generic_expr (dump_file, t);
622 	  fprintf (dump_file, "\n");
623 	}
624 
625       return true;
626     }
627 
628   return false;
629 }
630 
631 /* Helper function for tree_ssa_ifcombine_bb.  Recognize a CFG pattern and
632    dispatch to the appropriate if-conversion helper for a particular
633    set of INNER_COND_BB, OUTER_COND_BB, THEN_BB and ELSE_BB.
634    PHI_PRED_BB should be one of INNER_COND_BB, THEN_BB or ELSE_BB.  */
635 
636 static bool
tree_ssa_ifcombine_bb_1(basic_block inner_cond_bb,basic_block outer_cond_bb,basic_block then_bb,basic_block else_bb,basic_block phi_pred_bb)637 tree_ssa_ifcombine_bb_1 (basic_block inner_cond_bb, basic_block outer_cond_bb,
638 			 basic_block then_bb, basic_block else_bb,
639 			 basic_block phi_pred_bb)
640 {
641   /* The && form is characterized by a common else_bb with
642      the two edges leading to it mergable.  The latter is
643      guaranteed by matching PHI arguments in the else_bb and
644      the inner cond_bb having no side-effects.  */
645   if (phi_pred_bb != else_bb
646       && recognize_if_then_else (outer_cond_bb, &inner_cond_bb, &else_bb)
647       && same_phi_args_p (outer_cond_bb, phi_pred_bb, else_bb))
648     {
649       /* We have
650 	   <outer_cond_bb>
651 	     if (q) goto inner_cond_bb; else goto else_bb;
652 	   <inner_cond_bb>
653 	     if (p) goto ...; else goto else_bb;
654 	     ...
655 	   <else_bb>
656 	     ...
657        */
658       return ifcombine_ifandif (inner_cond_bb, false, outer_cond_bb, false,
659 				false);
660     }
661 
662   /* And a version where the outer condition is negated.  */
663   if (phi_pred_bb != else_bb
664       && recognize_if_then_else (outer_cond_bb, &else_bb, &inner_cond_bb)
665       && same_phi_args_p (outer_cond_bb, phi_pred_bb, else_bb))
666     {
667       /* We have
668 	   <outer_cond_bb>
669 	     if (q) goto else_bb; else goto inner_cond_bb;
670 	   <inner_cond_bb>
671 	     if (p) goto ...; else goto else_bb;
672 	     ...
673 	   <else_bb>
674 	     ...
675        */
676       return ifcombine_ifandif (inner_cond_bb, false, outer_cond_bb, true,
677 				false);
678     }
679 
680   /* The || form is characterized by a common then_bb with the
681      two edges leading to it mergable.  The latter is guaranteed
682      by matching PHI arguments in the then_bb and the inner cond_bb
683      having no side-effects.  */
684   if (phi_pred_bb != then_bb
685       && recognize_if_then_else (outer_cond_bb, &then_bb, &inner_cond_bb)
686       && same_phi_args_p (outer_cond_bb, phi_pred_bb, then_bb))
687     {
688       /* We have
689 	   <outer_cond_bb>
690 	     if (q) goto then_bb; else goto inner_cond_bb;
691 	   <inner_cond_bb>
692 	     if (q) goto then_bb; else goto ...;
693 	   <then_bb>
694 	     ...
695        */
696       return ifcombine_ifandif (inner_cond_bb, true, outer_cond_bb, true,
697 				true);
698     }
699 
700   /* And a version where the outer condition is negated.  */
701   if (phi_pred_bb != then_bb
702       && recognize_if_then_else (outer_cond_bb, &inner_cond_bb, &then_bb)
703       && same_phi_args_p (outer_cond_bb, phi_pred_bb, then_bb))
704     {
705       /* We have
706 	   <outer_cond_bb>
707 	     if (q) goto inner_cond_bb; else goto then_bb;
708 	   <inner_cond_bb>
709 	     if (q) goto then_bb; else goto ...;
710 	   <then_bb>
711 	     ...
712        */
713       return ifcombine_ifandif (inner_cond_bb, true, outer_cond_bb, false,
714 				true);
715     }
716 
717   return false;
718 }
719 
720 /* Recognize a CFG pattern and dispatch to the appropriate
721    if-conversion helper.  We start with BB as the innermost
722    worker basic-block.  Returns true if a transformation was done.  */
723 
724 static bool
tree_ssa_ifcombine_bb(basic_block inner_cond_bb)725 tree_ssa_ifcombine_bb (basic_block inner_cond_bb)
726 {
727   basic_block then_bb = NULL, else_bb = NULL;
728 
729   if (!recognize_if_then_else (inner_cond_bb, &then_bb, &else_bb))
730     return false;
731 
732   /* Recognize && and || of two conditions with a common
733      then/else block which entry edges we can merge.  That is:
734        if (a || b)
735 	 ;
736      and
737        if (a && b)
738 	 ;
739      This requires a single predecessor of the inner cond_bb.  */
740   if (single_pred_p (inner_cond_bb)
741       && bb_no_side_effects_p (inner_cond_bb))
742     {
743       basic_block outer_cond_bb = single_pred (inner_cond_bb);
744 
745       if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb,
746 				   then_bb, else_bb, inner_cond_bb))
747 	return true;
748 
749       if (forwarder_block_to (else_bb, then_bb))
750 	{
751 	  /* Other possibilities for the && form, if else_bb is
752 	     empty forwarder block to then_bb.  Compared to the above simpler
753 	     forms this can be treated as if then_bb and else_bb were swapped,
754 	     and the corresponding inner_cond_bb not inverted because of that.
755 	     For same_phi_args_p we look at equality of arguments between
756 	     edge from outer_cond_bb and the forwarder block.  */
757 	  if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, else_bb,
758 				       then_bb, else_bb))
759 	    return true;
760 	}
761       else if (forwarder_block_to (then_bb, else_bb))
762 	{
763 	  /* Other possibilities for the || form, if then_bb is
764 	     empty forwarder block to else_bb.  Compared to the above simpler
765 	     forms this can be treated as if then_bb and else_bb were swapped,
766 	     and the corresponding inner_cond_bb not inverted because of that.
767 	     For same_phi_args_p we look at equality of arguments between
768 	     edge from outer_cond_bb and the forwarder block.  */
769 	  if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, else_bb,
770 				       then_bb, then_bb))
771 	    return true;
772 	}
773     }
774 
775   return false;
776 }
777 
778 /* Main entry for the tree if-conversion pass.  */
779 
780 namespace {
781 
782 const pass_data pass_data_tree_ifcombine =
783 {
784   GIMPLE_PASS, /* type */
785   "ifcombine", /* name */
786   OPTGROUP_NONE, /* optinfo_flags */
787   TV_TREE_IFCOMBINE, /* tv_id */
788   ( PROP_cfg | PROP_ssa ), /* properties_required */
789   0, /* properties_provided */
790   0, /* properties_destroyed */
791   0, /* todo_flags_start */
792   TODO_update_ssa, /* todo_flags_finish */
793 };
794 
795 class pass_tree_ifcombine : public gimple_opt_pass
796 {
797 public:
pass_tree_ifcombine(gcc::context * ctxt)798   pass_tree_ifcombine (gcc::context *ctxt)
799     : gimple_opt_pass (pass_data_tree_ifcombine, ctxt)
800   {}
801 
802   /* opt_pass methods: */
803   virtual unsigned int execute (function *);
804 
805 }; // class pass_tree_ifcombine
806 
807 unsigned int
execute(function * fun)808 pass_tree_ifcombine::execute (function *fun)
809 {
810   basic_block *bbs;
811   bool cfg_changed = false;
812   int i;
813 
814   bbs = single_pred_before_succ_order ();
815   calculate_dominance_info (CDI_DOMINATORS);
816 
817   /* Search every basic block for COND_EXPR we may be able to optimize.
818 
819      We walk the blocks in order that guarantees that a block with
820      a single predecessor is processed after the predecessor.
821      This ensures that we collapse outter ifs before visiting the
822      inner ones, and also that we do not try to visit a removed
823      block.  This is opposite of PHI-OPT, because we cascade the
824      combining rather than cascading PHIs. */
825   for (i = n_basic_blocks_for_fn (fun) - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
826     {
827       basic_block bb = bbs[i];
828       gimple *stmt = last_stmt (bb);
829 
830       if (stmt
831 	  && gimple_code (stmt) == GIMPLE_COND)
832 	if (tree_ssa_ifcombine_bb (bb))
833 	  {
834 	    /* Clear range info from all stmts in BB which is now executed
835 	       conditional on a always true/false condition.  */
836 	    reset_flow_sensitive_info_in_bb (bb);
837 	    cfg_changed |= true;
838 	  }
839     }
840 
841   free (bbs);
842 
843   return cfg_changed ? TODO_cleanup_cfg : 0;
844 }
845 
846 } // anon namespace
847 
848 gimple_opt_pass *
make_pass_tree_ifcombine(gcc::context * ctxt)849 make_pass_tree_ifcombine (gcc::context *ctxt)
850 {
851   return new pass_tree_ifcombine (ctxt);
852 }
853