1 /* If-conversion for vectorizer.
2    Copyright (C) 2004-2020 Free Software Foundation, Inc.
3    Contributed by Devang Patel <dpatel@apple.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 /* This pass implements a tree level if-conversion of loops.  Its
22    initial goal is to help the vectorizer to vectorize loops with
23    conditions.
24 
25    A short description of if-conversion:
26 
27      o Decide if a loop is if-convertible or not.
28      o Walk all loop basic blocks in breadth first order (BFS order).
29        o Remove conditional statements (at the end of basic block)
30          and propagate condition into destination basic blocks'
31 	 predicate list.
32        o Replace modify expression with conditional modify expression
33          using current basic block's condition.
34      o Merge all basic blocks
35        o Replace phi nodes with conditional modify expr
36        o Merge all basic blocks into header
37 
38      Sample transformation:
39 
40      INPUT
41      -----
42 
43      # i_23 = PHI <0(0), i_18(10)>;
44      <L0>:;
45      j_15 = A[i_23];
46      if (j_15 > 41) goto <L1>; else goto <L17>;
47 
48      <L17>:;
49      goto <bb 3> (<L3>);
50 
51      <L1>:;
52 
53      # iftmp.2_4 = PHI <0(8), 42(2)>;
54      <L3>:;
55      A[i_23] = iftmp.2_4;
56      i_18 = i_23 + 1;
57      if (i_18 <= 15) goto <L19>; else goto <L18>;
58 
59      <L19>:;
60      goto <bb 1> (<L0>);
61 
62      <L18>:;
63 
64      OUTPUT
65      ------
66 
67      # i_23 = PHI <0(0), i_18(10)>;
68      <L0>:;
69      j_15 = A[i_23];
70 
71      <L3>:;
72      iftmp.2_4 = j_15 > 41 ? 42 : 0;
73      A[i_23] = iftmp.2_4;
74      i_18 = i_23 + 1;
75      if (i_18 <= 15) goto <L19>; else goto <L18>;
76 
77      <L19>:;
78      goto <bb 1> (<L0>);
79 
80      <L18>:;
81 */
82 
83 #include "config.h"
84 #include "system.h"
85 #include "coretypes.h"
86 #include "backend.h"
87 #include "rtl.h"
88 #include "tree.h"
89 #include "gimple.h"
90 #include "cfghooks.h"
91 #include "tree-pass.h"
92 #include "ssa.h"
93 #include "expmed.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
96 #include "alias.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
106 #include "cfgloop.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop.h"
110 #include "tree-ssa-loop-niter.h"
111 #include "tree-ssa-loop-ivopts.h"
112 #include "tree-ssa-address.h"
113 #include "dbgcnt.h"
114 #include "tree-hash-traits.h"
115 #include "varasm.h"
116 #include "builtins.h"
117 #include "cfganal.h"
118 #include "internal-fn.h"
119 #include "fold-const.h"
120 #include "tree-ssa-sccvn.h"
121 #include "tree-cfgcleanup.h"
122 #include "tree-ssa-dse.h"
123 
124 /* Only handle PHIs with no more arguments unless we are asked to by
125    simd pragma.  */
126 #define MAX_PHI_ARG_NUM \
127   ((unsigned) param_max_tree_if_conversion_phi_args)
128 
129 /* True if we've converted a statement that was only executed when some
130    condition C was true, and if for correctness we need to predicate the
131    statement to ensure that it is a no-op when C is false.  See
132    predicate_statements for the kinds of predication we support.  */
133 static bool need_to_predicate;
134 
135 /* Indicate if there are any complicated PHIs that need to be handled in
136    if-conversion.  Complicated PHI has more than two arguments and can't
137    be degenerated to two arguments PHI.  See more information in comment
138    before phi_convertible_by_degenerating_args.  */
139 static bool any_complicated_phi;
140 
141 /* Hash for struct innermost_loop_behavior.  It depends on the user to
142    free the memory.  */
143 
144 struct innermost_loop_behavior_hash : nofree_ptr_hash <innermost_loop_behavior>
145 {
146   static inline hashval_t hash (const value_type &);
147   static inline bool equal (const value_type &,
148 			    const compare_type &);
149 };
150 
151 inline hashval_t
152 innermost_loop_behavior_hash::hash (const value_type &e)
153 {
154   hashval_t hash;
155 
156   hash = iterative_hash_expr (e->base_address, 0);
157   hash = iterative_hash_expr (e->offset, hash);
158   hash = iterative_hash_expr (e->init, hash);
159   return iterative_hash_expr (e->step, hash);
160 }
161 
162 inline bool
163 innermost_loop_behavior_hash::equal (const value_type &e1,
164 				     const compare_type &e2)
165 {
166   if ((e1->base_address && !e2->base_address)
167       || (!e1->base_address && e2->base_address)
168       || (!e1->offset && e2->offset)
169       || (e1->offset && !e2->offset)
170       || (!e1->init && e2->init)
171       || (e1->init && !e2->init)
172       || (!e1->step && e2->step)
173       || (e1->step && !e2->step))
174     return false;
175 
176   if (e1->base_address && e2->base_address
177       && !operand_equal_p (e1->base_address, e2->base_address, 0))
178     return false;
179   if (e1->offset && e2->offset
180       && !operand_equal_p (e1->offset, e2->offset, 0))
181     return false;
182   if (e1->init && e2->init
183       && !operand_equal_p (e1->init, e2->init, 0))
184     return false;
185   if (e1->step && e2->step
186       && !operand_equal_p (e1->step, e2->step, 0))
187     return false;
188 
189   return true;
190 }
191 
192 /* List of basic blocks in if-conversion-suitable order.  */
193 static basic_block *ifc_bbs;
194 
195 /* Hash table to store <DR's innermost loop behavior, DR> pairs.  */
196 static hash_map<innermost_loop_behavior_hash,
197 		data_reference_p> *innermost_DR_map;
198 
199 /* Hash table to store <base reference, DR> pairs.  */
200 static hash_map<tree_operand_hash, data_reference_p> *baseref_DR_map;
201 
202 /* List of redundant SSA names: the first should be replaced by the second.  */
203 static vec< std::pair<tree, tree> > redundant_ssa_names;
204 
205 /* Structure used to predicate basic blocks.  This is attached to the
206    ->aux field of the BBs in the loop to be if-converted.  */
207 struct bb_predicate {
208 
209   /* The condition under which this basic block is executed.  */
210   tree predicate;
211 
212   /* PREDICATE is gimplified, and the sequence of statements is
213      recorded here, in order to avoid the duplication of computations
214      that occur in previous conditions.  See PR44483.  */
215   gimple_seq predicate_gimplified_stmts;
216 };
217 
218 /* Returns true when the basic block BB has a predicate.  */
219 
220 static inline bool
221 bb_has_predicate (basic_block bb)
222 {
223   return bb->aux != NULL;
224 }
225 
226 /* Returns the gimplified predicate for basic block BB.  */
227 
228 static inline tree
229 bb_predicate (basic_block bb)
230 {
231   return ((struct bb_predicate *) bb->aux)->predicate;
232 }
233 
234 /* Sets the gimplified predicate COND for basic block BB.  */
235 
236 static inline void
237 set_bb_predicate (basic_block bb, tree cond)
238 {
239   gcc_assert ((TREE_CODE (cond) == TRUTH_NOT_EXPR
240 	       && is_gimple_condexpr (TREE_OPERAND (cond, 0)))
241 	      || is_gimple_condexpr (cond));
242   ((struct bb_predicate *) bb->aux)->predicate = cond;
243 }
244 
245 /* Returns the sequence of statements of the gimplification of the
246    predicate for basic block BB.  */
247 
248 static inline gimple_seq
249 bb_predicate_gimplified_stmts (basic_block bb)
250 {
251   return ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts;
252 }
253 
254 /* Sets the sequence of statements STMTS of the gimplification of the
255    predicate for basic block BB.  */
256 
257 static inline void
258 set_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
259 {
260   ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts = stmts;
261 }
262 
263 /* Adds the sequence of statements STMTS to the sequence of statements
264    of the predicate for basic block BB.  */
265 
266 static inline void
267 add_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
268 {
269   /* We might have updated some stmts in STMTS via force_gimple_operand
270      calling fold_stmt and that producing multiple stmts.  Delink immediate
271      uses so update_ssa after loop versioning doesn't get confused for
272      the not yet inserted predicates.
273      ???  This should go away once we reliably avoid updating stmts
274      not in any BB.  */
275   for (gimple_stmt_iterator gsi = gsi_start (stmts);
276        !gsi_end_p (gsi); gsi_next (&gsi))
277     {
278       gimple *stmt = gsi_stmt (gsi);
279       delink_stmt_imm_use (stmt);
280       gimple_set_modified (stmt, true);
281     }
282   gimple_seq_add_seq_without_update
283     (&(((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts), stmts);
284 }
285 
286 /* Initializes to TRUE the predicate of basic block BB.  */
287 
288 static inline void
289 init_bb_predicate (basic_block bb)
290 {
291   bb->aux = XNEW (struct bb_predicate);
292   set_bb_predicate_gimplified_stmts (bb, NULL);
293   set_bb_predicate (bb, boolean_true_node);
294 }
295 
296 /* Release the SSA_NAMEs associated with the predicate of basic block BB.  */
297 
298 static inline void
299 release_bb_predicate (basic_block bb)
300 {
301   gimple_seq stmts = bb_predicate_gimplified_stmts (bb);
302   if (stmts)
303     {
304       /* Ensure that these stmts haven't yet been added to a bb.  */
305       if (flag_checking)
306 	for (gimple_stmt_iterator i = gsi_start (stmts);
307 	     !gsi_end_p (i); gsi_next (&i))
308 	  gcc_assert (! gimple_bb (gsi_stmt (i)));
309 
310       /* Discard them.  */
311       gimple_seq_discard (stmts);
312       set_bb_predicate_gimplified_stmts (bb, NULL);
313     }
314 }
315 
316 /* Free the predicate of basic block BB.  */
317 
318 static inline void
319 free_bb_predicate (basic_block bb)
320 {
321   if (!bb_has_predicate (bb))
322     return;
323 
324   release_bb_predicate (bb);
325   free (bb->aux);
326   bb->aux = NULL;
327 }
328 
329 /* Reinitialize predicate of BB with the true predicate.  */
330 
331 static inline void
332 reset_bb_predicate (basic_block bb)
333 {
334   if (!bb_has_predicate (bb))
335     init_bb_predicate (bb);
336   else
337     {
338       release_bb_predicate (bb);
339       set_bb_predicate (bb, boolean_true_node);
340     }
341 }
342 
343 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
344    the expression EXPR.  Inserts the statement created for this
345    computation before GSI and leaves the iterator GSI at the same
346    statement.  */
347 
348 static tree
349 ifc_temp_var (tree type, tree expr, gimple_stmt_iterator *gsi)
350 {
351   tree new_name = make_temp_ssa_name (type, NULL, "_ifc_");
352   gimple *stmt = gimple_build_assign (new_name, expr);
353   gimple_set_vuse (stmt, gimple_vuse (gsi_stmt (*gsi)));
354   gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
355   return new_name;
356 }
357 
358 /* Return true when COND is a false predicate.  */
359 
360 static inline bool
361 is_false_predicate (tree cond)
362 {
363   return (cond != NULL_TREE
364 	  && (cond == boolean_false_node
365 	      || integer_zerop (cond)));
366 }
367 
368 /* Return true when COND is a true predicate.  */
369 
370 static inline bool
371 is_true_predicate (tree cond)
372 {
373   return (cond == NULL_TREE
374 	  || cond == boolean_true_node
375 	  || integer_onep (cond));
376 }
377 
378 /* Returns true when BB has a predicate that is not trivial: true or
379    NULL_TREE.  */
380 
381 static inline bool
382 is_predicated (basic_block bb)
383 {
384   return !is_true_predicate (bb_predicate (bb));
385 }
386 
387 /* Parses the predicate COND and returns its comparison code and
388    operands OP0 and OP1.  */
389 
390 static enum tree_code
391 parse_predicate (tree cond, tree *op0, tree *op1)
392 {
393   gimple *s;
394 
395   if (TREE_CODE (cond) == SSA_NAME
396       && is_gimple_assign (s = SSA_NAME_DEF_STMT (cond)))
397     {
398       if (TREE_CODE_CLASS (gimple_assign_rhs_code (s)) == tcc_comparison)
399 	{
400 	  *op0 = gimple_assign_rhs1 (s);
401 	  *op1 = gimple_assign_rhs2 (s);
402 	  return gimple_assign_rhs_code (s);
403 	}
404 
405       else if (gimple_assign_rhs_code (s) == TRUTH_NOT_EXPR)
406 	{
407 	  tree op = gimple_assign_rhs1 (s);
408 	  tree type = TREE_TYPE (op);
409 	  enum tree_code code = parse_predicate (op, op0, op1);
410 
411 	  return code == ERROR_MARK ? ERROR_MARK
412 	    : invert_tree_comparison (code, HONOR_NANS (type));
413 	}
414 
415       return ERROR_MARK;
416     }
417 
418   if (COMPARISON_CLASS_P (cond))
419     {
420       *op0 = TREE_OPERAND (cond, 0);
421       *op1 = TREE_OPERAND (cond, 1);
422       return TREE_CODE (cond);
423     }
424 
425   return ERROR_MARK;
426 }
427 
428 /* Returns the fold of predicate C1 OR C2 at location LOC.  */
429 
430 static tree
431 fold_or_predicates (location_t loc, tree c1, tree c2)
432 {
433   tree op1a, op1b, op2a, op2b;
434   enum tree_code code1 = parse_predicate (c1, &op1a, &op1b);
435   enum tree_code code2 = parse_predicate (c2, &op2a, &op2b);
436 
437   if (code1 != ERROR_MARK && code2 != ERROR_MARK)
438     {
439       tree t = maybe_fold_or_comparisons (boolean_type_node, code1, op1a, op1b,
440 					  code2, op2a, op2b);
441       if (t)
442 	return t;
443     }
444 
445   return fold_build2_loc (loc, TRUTH_OR_EXPR, boolean_type_node, c1, c2);
446 }
447 
448 /* Returns either a COND_EXPR or the folded expression if the folded
449    expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
450    a constant or a SSA_NAME. */
451 
452 static tree
453 fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
454 {
455   tree rhs1, lhs1, cond_expr;
456 
457   /* If COND is comparison r != 0 and r has boolean type, convert COND
458      to SSA_NAME to accept by vect bool pattern.  */
459   if (TREE_CODE (cond) == NE_EXPR)
460     {
461       tree op0 = TREE_OPERAND (cond, 0);
462       tree op1 = TREE_OPERAND (cond, 1);
463       if (TREE_CODE (op0) == SSA_NAME
464 	  && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
465 	  && (integer_zerop (op1)))
466 	cond = op0;
467     }
468   cond_expr = fold_ternary (COND_EXPR, type, cond, rhs, lhs);
469 
470   if (cond_expr == NULL_TREE)
471     return build3 (COND_EXPR, type, cond, rhs, lhs);
472 
473   STRIP_USELESS_TYPE_CONVERSION (cond_expr);
474 
475   if (is_gimple_val (cond_expr))
476     return cond_expr;
477 
478   if (TREE_CODE (cond_expr) == ABS_EXPR)
479     {
480       rhs1 = TREE_OPERAND (cond_expr, 1);
481       STRIP_USELESS_TYPE_CONVERSION (rhs1);
482       if (is_gimple_val (rhs1))
483 	return build1 (ABS_EXPR, type, rhs1);
484     }
485 
486   if (TREE_CODE (cond_expr) == MIN_EXPR
487       || TREE_CODE (cond_expr) == MAX_EXPR)
488     {
489       lhs1 = TREE_OPERAND (cond_expr, 0);
490       STRIP_USELESS_TYPE_CONVERSION (lhs1);
491       rhs1 = TREE_OPERAND (cond_expr, 1);
492       STRIP_USELESS_TYPE_CONVERSION (rhs1);
493       if (is_gimple_val (rhs1) && is_gimple_val (lhs1))
494 	return build2 (TREE_CODE (cond_expr), type, lhs1, rhs1);
495     }
496   return build3 (COND_EXPR, type, cond, rhs, lhs);
497 }
498 
499 /* Add condition NC to the predicate list of basic block BB.  LOOP is
500    the loop to be if-converted. Use predicate of cd-equivalent block
501    for join bb if it exists: we call basic blocks bb1 and bb2
502    cd-equivalent if they are executed under the same condition.  */
503 
504 static inline void
505 add_to_predicate_list (class loop *loop, basic_block bb, tree nc)
506 {
507   tree bc, *tp;
508   basic_block dom_bb;
509 
510   if (is_true_predicate (nc))
511     return;
512 
513   /* If dominance tells us this basic block is always executed,
514      don't record any predicates for it.  */
515   if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
516     return;
517 
518   dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb);
519   /* We use notion of cd equivalence to get simpler predicate for
520      join block, e.g. if join block has 2 predecessors with predicates
521      p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
522      p1 & p2 | p1 & !p2.  */
523   if (dom_bb != loop->header
524       && get_immediate_dominator (CDI_POST_DOMINATORS, dom_bb) == bb)
525     {
526       gcc_assert (flow_bb_inside_loop_p (loop, dom_bb));
527       bc = bb_predicate (dom_bb);
528       if (!is_true_predicate (bc))
529 	set_bb_predicate (bb, bc);
530       else
531 	gcc_assert (is_true_predicate (bb_predicate (bb)));
532       if (dump_file && (dump_flags & TDF_DETAILS))
533 	fprintf (dump_file, "Use predicate of bb#%d for bb#%d\n",
534 		 dom_bb->index, bb->index);
535       return;
536     }
537 
538   if (!is_predicated (bb))
539     bc = nc;
540   else
541     {
542       bc = bb_predicate (bb);
543       bc = fold_or_predicates (EXPR_LOCATION (bc), nc, bc);
544       if (is_true_predicate (bc))
545 	{
546 	  reset_bb_predicate (bb);
547 	  return;
548 	}
549     }
550 
551   /* Allow a TRUTH_NOT_EXPR around the main predicate.  */
552   if (TREE_CODE (bc) == TRUTH_NOT_EXPR)
553     tp = &TREE_OPERAND (bc, 0);
554   else
555     tp = &bc;
556   if (!is_gimple_condexpr (*tp))
557     {
558       gimple_seq stmts;
559       *tp = force_gimple_operand_1 (*tp, &stmts, is_gimple_condexpr, NULL_TREE);
560       add_bb_predicate_gimplified_stmts (bb, stmts);
561     }
562   set_bb_predicate (bb, bc);
563 }
564 
565 /* Add the condition COND to the previous condition PREV_COND, and add
566    this to the predicate list of the destination of edge E.  LOOP is
567    the loop to be if-converted.  */
568 
569 static void
570 add_to_dst_predicate_list (class loop *loop, edge e,
571 			   tree prev_cond, tree cond)
572 {
573   if (!flow_bb_inside_loop_p (loop, e->dest))
574     return;
575 
576   if (!is_true_predicate (prev_cond))
577     cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
578 			prev_cond, cond);
579 
580   if (!dominated_by_p (CDI_DOMINATORS, loop->latch, e->dest))
581     add_to_predicate_list (loop, e->dest, cond);
582 }
583 
584 /* Return true if one of the successor edges of BB exits LOOP.  */
585 
586 static bool
587 bb_with_exit_edge_p (class loop *loop, basic_block bb)
588 {
589   edge e;
590   edge_iterator ei;
591 
592   FOR_EACH_EDGE (e, ei, bb->succs)
593     if (loop_exit_edge_p (loop, e))
594       return true;
595 
596   return false;
597 }
598 
599 /* Given PHI which has more than two arguments, this function checks if
600    it's if-convertible by degenerating its arguments.  Specifically, if
601    below two conditions are satisfied:
602 
603      1) Number of PHI arguments with different values equals to 2 and one
604 	argument has the only occurrence.
605      2) The edge corresponding to the unique argument isn't critical edge.
606 
607    Such PHI can be handled as PHIs have only two arguments.  For example,
608    below PHI:
609 
610      res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
611 
612    can be transformed into:
613 
614      res = (predicate of e3) ? A_2 : A_1;
615 
616    Return TRUE if it is the case, FALSE otherwise.  */
617 
618 static bool
619 phi_convertible_by_degenerating_args (gphi *phi)
620 {
621   edge e;
622   tree arg, t1 = NULL, t2 = NULL;
623   unsigned int i, i1 = 0, i2 = 0, n1 = 0, n2 = 0;
624   unsigned int num_args = gimple_phi_num_args (phi);
625 
626   gcc_assert (num_args > 2);
627 
628   for (i = 0; i < num_args; i++)
629     {
630       arg = gimple_phi_arg_def (phi, i);
631       if (t1 == NULL || operand_equal_p (t1, arg, 0))
632 	{
633 	  n1++;
634 	  i1 = i;
635 	  t1 = arg;
636 	}
637       else if (t2 == NULL || operand_equal_p (t2, arg, 0))
638 	{
639 	  n2++;
640 	  i2 = i;
641 	  t2 = arg;
642 	}
643       else
644 	return false;
645     }
646 
647   if (n1 != 1 && n2 != 1)
648     return false;
649 
650   /* Check if the edge corresponding to the unique arg is critical.  */
651   e = gimple_phi_arg_edge (phi, (n1 == 1) ? i1 : i2);
652   if (EDGE_COUNT (e->src->succs) > 1)
653     return false;
654 
655   return true;
656 }
657 
658 /* Return true when PHI is if-convertible.  PHI is part of loop LOOP
659    and it belongs to basic block BB.  Note at this point, it is sure
660    that PHI is if-convertible.  This function updates global variable
661    ANY_COMPLICATED_PHI if PHI is complicated.  */
662 
663 static bool
664 if_convertible_phi_p (class loop *loop, basic_block bb, gphi *phi)
665 {
666   if (dump_file && (dump_flags & TDF_DETAILS))
667     {
668       fprintf (dump_file, "-------------------------\n");
669       print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
670     }
671 
672   if (bb != loop->header
673       && gimple_phi_num_args (phi) > 2
674       && !phi_convertible_by_degenerating_args (phi))
675     any_complicated_phi = true;
676 
677   return true;
678 }
679 
680 /* Records the status of a data reference.  This struct is attached to
681    each DR->aux field.  */
682 
683 struct ifc_dr {
684   bool rw_unconditionally;
685   bool w_unconditionally;
686   bool written_at_least_once;
687 
688   tree rw_predicate;
689   tree w_predicate;
690   tree base_w_predicate;
691 };
692 
693 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
694 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
695 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
696 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
697 
698 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
699    HASH tables.  While storing them in HASH table, it checks if the
700    reference is unconditionally read or written and stores that as a flag
701    information.  For base reference it checks if it is written atlest once
702    unconditionally and stores it as flag information along with DR.
703    In other words for every data reference A in STMT there exist other
704    accesses to a data reference with the same base with predicates that
705    add up (OR-up) to the true predicate: this ensures that the data
706    reference A is touched (read or written) on every iteration of the
707    if-converted loop.  */
708 static void
709 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a)
710 {
711 
712   data_reference_p *master_dr, *base_master_dr;
713   tree base_ref = DR_BASE_OBJECT (a);
714   innermost_loop_behavior *innermost = &DR_INNERMOST (a);
715   tree ca = bb_predicate (gimple_bb (DR_STMT (a)));
716   bool exist1, exist2;
717 
718   master_dr = &innermost_DR_map->get_or_insert (innermost, &exist1);
719   if (!exist1)
720     *master_dr = a;
721 
722   if (DR_IS_WRITE (a))
723     {
724       IFC_DR (*master_dr)->w_predicate
725 	= fold_or_predicates (UNKNOWN_LOCATION, ca,
726 			      IFC_DR (*master_dr)->w_predicate);
727       if (is_true_predicate (IFC_DR (*master_dr)->w_predicate))
728 	DR_W_UNCONDITIONALLY (*master_dr) = true;
729     }
730   IFC_DR (*master_dr)->rw_predicate
731     = fold_or_predicates (UNKNOWN_LOCATION, ca,
732 			  IFC_DR (*master_dr)->rw_predicate);
733   if (is_true_predicate (IFC_DR (*master_dr)->rw_predicate))
734     DR_RW_UNCONDITIONALLY (*master_dr) = true;
735 
736   if (DR_IS_WRITE (a))
737     {
738       base_master_dr = &baseref_DR_map->get_or_insert (base_ref, &exist2);
739       if (!exist2)
740 	*base_master_dr = a;
741       IFC_DR (*base_master_dr)->base_w_predicate
742 	= fold_or_predicates (UNKNOWN_LOCATION, ca,
743 			      IFC_DR (*base_master_dr)->base_w_predicate);
744       if (is_true_predicate (IFC_DR (*base_master_dr)->base_w_predicate))
745 	DR_BASE_W_UNCONDITIONALLY (*base_master_dr) = true;
746     }
747 }
748 
749 /* Return TRUE if can prove the index IDX of an array reference REF is
750    within array bound.  Return false otherwise.  */
751 
752 static bool
753 idx_within_array_bound (tree ref, tree *idx, void *dta)
754 {
755   wi::overflow_type overflow;
756   widest_int niter, valid_niter, delta, wi_step;
757   tree ev, init, step;
758   tree low, high;
759   class loop *loop = (class loop*) dta;
760 
761   /* Only support within-bound access for array references.  */
762   if (TREE_CODE (ref) != ARRAY_REF)
763     return false;
764 
765   /* For arrays at the end of the structure, we are not guaranteed that they
766      do not really extend over their declared size.  However, for arrays of
767      size greater than one, this is unlikely to be intended.  */
768   if (array_at_struct_end_p (ref))
769     return false;
770 
771   ev = analyze_scalar_evolution (loop, *idx);
772   ev = instantiate_parameters (loop, ev);
773   init = initial_condition (ev);
774   step = evolution_part_in_loop_num (ev, loop->num);
775 
776   if (!init || TREE_CODE (init) != INTEGER_CST
777       || (step && TREE_CODE (step) != INTEGER_CST))
778     return false;
779 
780   low = array_ref_low_bound (ref);
781   high = array_ref_up_bound (ref);
782 
783   /* The case of nonconstant bounds could be handled, but it would be
784      complicated.  */
785   if (TREE_CODE (low) != INTEGER_CST
786       || !high || TREE_CODE (high) != INTEGER_CST)
787     return false;
788 
789   /* Check if the intial idx is within bound.  */
790   if (wi::to_widest (init) < wi::to_widest (low)
791       || wi::to_widest (init) > wi::to_widest (high))
792     return false;
793 
794   /* The idx is always within bound.  */
795   if (!step || integer_zerop (step))
796     return true;
797 
798   if (!max_loop_iterations (loop, &niter))
799     return false;
800 
801   if (wi::to_widest (step) < 0)
802     {
803       delta = wi::to_widest (init) - wi::to_widest (low);
804       wi_step = -wi::to_widest (step);
805     }
806   else
807     {
808       delta = wi::to_widest (high) - wi::to_widest (init);
809       wi_step = wi::to_widest (step);
810     }
811 
812   valid_niter = wi::div_floor (delta, wi_step, SIGNED, &overflow);
813   /* The iteration space of idx is within array bound.  */
814   if (!overflow && niter <= valid_niter)
815     return true;
816 
817   return false;
818 }
819 
820 /* Return TRUE if ref is a within bound array reference.  */
821 
822 static bool
823 ref_within_array_bound (gimple *stmt, tree ref)
824 {
825   class loop *loop = loop_containing_stmt (stmt);
826 
827   gcc_assert (loop != NULL);
828   return for_each_index (&ref, idx_within_array_bound, loop);
829 }
830 
831 
832 /* Given a memory reference expression T, return TRUE if base object
833    it refers to is writable.  The base object of a memory reference
834    is the main object being referenced, which is returned by function
835    get_base_address.  */
836 
837 static bool
838 base_object_writable (tree ref)
839 {
840   tree base_tree = get_base_address (ref);
841 
842   return (base_tree
843 	  && DECL_P (base_tree)
844 	  && decl_binds_to_current_def_p (base_tree)
845 	  && !TREE_READONLY (base_tree));
846 }
847 
848 /* Return true when the memory references of STMT won't trap in the
849    if-converted code.  There are two things that we have to check for:
850 
851    - writes to memory occur to writable memory: if-conversion of
852    memory writes transforms the conditional memory writes into
853    unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
854    into "A[i] = cond ? foo : A[i]", and as the write to memory may not
855    be executed at all in the original code, it may be a readonly
856    memory.  To check that A is not const-qualified, we check that
857    there exists at least an unconditional write to A in the current
858    function.
859 
860    - reads or writes to memory are valid memory accesses for every
861    iteration.  To check that the memory accesses are correctly formed
862    and that we are allowed to read and write in these locations, we
863    check that the memory accesses to be if-converted occur at every
864    iteration unconditionally.
865 
866    Returns true for the memory reference in STMT, same memory reference
867    is read or written unconditionally atleast once and the base memory
868    reference is written unconditionally once.  This is to check reference
869    will not write fault.  Also retuns true if the memory reference is
870    unconditionally read once then we are conditionally writing to memory
871    which is defined as read and write and is bound to the definition
872    we are seeing.  */
873 static bool
874 ifcvt_memrefs_wont_trap (gimple *stmt, vec<data_reference_p> drs)
875 {
876   /* If DR didn't see a reference here we can't use it to tell
877      whether the ref traps or not.  */
878   if (gimple_uid (stmt) == 0)
879     return false;
880 
881   data_reference_p *master_dr, *base_master_dr;
882   data_reference_p a = drs[gimple_uid (stmt) - 1];
883 
884   tree base = DR_BASE_OBJECT (a);
885   innermost_loop_behavior *innermost = &DR_INNERMOST (a);
886 
887   gcc_assert (DR_STMT (a) == stmt);
888   gcc_assert (DR_BASE_ADDRESS (a) || DR_OFFSET (a)
889               || DR_INIT (a) || DR_STEP (a));
890 
891   master_dr = innermost_DR_map->get (innermost);
892   gcc_assert (master_dr != NULL);
893 
894   base_master_dr = baseref_DR_map->get (base);
895 
896   /* If a is unconditionally written to it doesn't trap.  */
897   if (DR_W_UNCONDITIONALLY (*master_dr))
898     return true;
899 
900   /* If a is unconditionally accessed then ...
901 
902      Even a is conditional access, we can treat it as an unconditional
903      one if it's an array reference and all its index are within array
904      bound.  */
905   if (DR_RW_UNCONDITIONALLY (*master_dr)
906       || ref_within_array_bound (stmt, DR_REF (a)))
907     {
908       /* an unconditional read won't trap.  */
909       if (DR_IS_READ (a))
910 	return true;
911 
912       /* an unconditionaly write won't trap if the base is written
913          to unconditionally.  */
914       if (base_master_dr
915 	  && DR_BASE_W_UNCONDITIONALLY (*base_master_dr))
916 	return flag_store_data_races;
917       /* or the base is known to be not readonly.  */
918       else if (base_object_writable (DR_REF (a)))
919 	return flag_store_data_races;
920     }
921 
922   return false;
923 }
924 
925 /* Return true if STMT could be converted into a masked load or store
926    (conditional load or store based on a mask computed from bb predicate).  */
927 
928 static bool
929 ifcvt_can_use_mask_load_store (gimple *stmt)
930 {
931   /* Check whether this is a load or store.  */
932   tree lhs = gimple_assign_lhs (stmt);
933   bool is_load;
934   tree ref;
935   if (gimple_store_p (stmt))
936     {
937       if (!is_gimple_val (gimple_assign_rhs1 (stmt)))
938 	return false;
939       is_load = false;
940       ref = lhs;
941     }
942   else if (gimple_assign_load_p (stmt))
943     {
944       is_load = true;
945       ref = gimple_assign_rhs1 (stmt);
946     }
947   else
948     return false;
949 
950   if (may_be_nonaddressable_p (ref))
951     return false;
952 
953   /* Mask should be integer mode of the same size as the load/store
954      mode.  */
955   machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
956   if (!int_mode_for_mode (mode).exists () || VECTOR_MODE_P (mode))
957     return false;
958 
959   if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
960     return true;
961 
962   return false;
963 }
964 
965 /* Return true if STMT could be converted from an operation that is
966    unconditional to one that is conditional on a bb predicate mask.  */
967 
968 static bool
969 ifcvt_can_predicate (gimple *stmt)
970 {
971   basic_block bb = gimple_bb (stmt);
972 
973   if (!(flag_tree_loop_vectorize || bb->loop_father->force_vectorize)
974       || bb->loop_father->dont_vectorize
975       || gimple_has_volatile_ops (stmt))
976     return false;
977 
978   if (gimple_assign_single_p (stmt))
979     return ifcvt_can_use_mask_load_store (stmt);
980 
981   tree_code code = gimple_assign_rhs_code (stmt);
982   tree lhs_type = TREE_TYPE (gimple_assign_lhs (stmt));
983   tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
984   if (!types_compatible_p (lhs_type, rhs_type))
985     return false;
986   internal_fn cond_fn = get_conditional_internal_fn (code);
987   return (cond_fn != IFN_LAST
988 	  && vectorized_internal_fn_supported_p (cond_fn, lhs_type));
989 }
990 
991 /* Return true when STMT is if-convertible.
992 
993    GIMPLE_ASSIGN statement is not if-convertible if,
994    - it is not movable,
995    - it could trap,
996    - LHS is not var decl.  */
997 
998 static bool
999 if_convertible_gimple_assign_stmt_p (gimple *stmt,
1000 				     vec<data_reference_p> refs)
1001 {
1002   tree lhs = gimple_assign_lhs (stmt);
1003 
1004   if (dump_file && (dump_flags & TDF_DETAILS))
1005     {
1006       fprintf (dump_file, "-------------------------\n");
1007       print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1008     }
1009 
1010   if (!is_gimple_reg_type (TREE_TYPE (lhs)))
1011     return false;
1012 
1013   /* Some of these constrains might be too conservative.  */
1014   if (stmt_ends_bb_p (stmt)
1015       || gimple_has_volatile_ops (stmt)
1016       || (TREE_CODE (lhs) == SSA_NAME
1017           && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
1018       || gimple_has_side_effects (stmt))
1019     {
1020       if (dump_file && (dump_flags & TDF_DETAILS))
1021         fprintf (dump_file, "stmt not suitable for ifcvt\n");
1022       return false;
1023     }
1024 
1025   /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
1026      in between if_convertible_loop_p and combine_blocks
1027      we can perform loop versioning.  */
1028   gimple_set_plf (stmt, GF_PLF_2, false);
1029 
1030   if ((! gimple_vuse (stmt)
1031        || gimple_could_trap_p_1 (stmt, false, false)
1032        || ! ifcvt_memrefs_wont_trap (stmt, refs))
1033       && gimple_could_trap_p (stmt))
1034     {
1035       if (ifcvt_can_predicate (stmt))
1036 	{
1037 	  gimple_set_plf (stmt, GF_PLF_2, true);
1038 	  need_to_predicate = true;
1039 	  return true;
1040 	}
1041       if (dump_file && (dump_flags & TDF_DETAILS))
1042 	fprintf (dump_file, "tree could trap...\n");
1043       return false;
1044     }
1045 
1046   /* When if-converting stores force versioning, likewise if we
1047      ended up generating store data races.  */
1048   if (gimple_vdef (stmt))
1049     need_to_predicate = true;
1050 
1051   return true;
1052 }
1053 
1054 /* Return true when STMT is if-convertible.
1055 
1056    A statement is if-convertible if:
1057    - it is an if-convertible GIMPLE_ASSIGN,
1058    - it is a GIMPLE_LABEL or a GIMPLE_COND,
1059    - it is builtins call.  */
1060 
1061 static bool
1062 if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs)
1063 {
1064   switch (gimple_code (stmt))
1065     {
1066     case GIMPLE_LABEL:
1067     case GIMPLE_DEBUG:
1068     case GIMPLE_COND:
1069       return true;
1070 
1071     case GIMPLE_ASSIGN:
1072       return if_convertible_gimple_assign_stmt_p (stmt, refs);
1073 
1074     case GIMPLE_CALL:
1075       {
1076 	tree fndecl = gimple_call_fndecl (stmt);
1077 	if (fndecl)
1078 	  {
1079 	    int flags = gimple_call_flags (stmt);
1080 	    if ((flags & ECF_CONST)
1081 		&& !(flags & ECF_LOOPING_CONST_OR_PURE)
1082 		/* We can only vectorize some builtins at the moment,
1083 		   so restrict if-conversion to those.  */
1084 		&& fndecl_built_in_p (fndecl))
1085 	      return true;
1086 	  }
1087 	return false;
1088       }
1089 
1090     default:
1091       /* Don't know what to do with 'em so don't do anything.  */
1092       if (dump_file && (dump_flags & TDF_DETAILS))
1093 	{
1094 	  fprintf (dump_file, "don't know what to do\n");
1095 	  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1096 	}
1097       return false;
1098     }
1099 
1100   return true;
1101 }
1102 
1103 /* Assumes that BB has more than 1 predecessors.
1104    Returns false if at least one successor is not on critical edge
1105    and true otherwise.  */
1106 
1107 static inline bool
1108 all_preds_critical_p (basic_block bb)
1109 {
1110   edge e;
1111   edge_iterator ei;
1112 
1113   FOR_EACH_EDGE (e, ei, bb->preds)
1114     if (EDGE_COUNT (e->src->succs) == 1)
1115       return false;
1116   return true;
1117 }
1118 
1119 /* Return true when BB is if-convertible.  This routine does not check
1120    basic block's statements and phis.
1121 
1122    A basic block is not if-convertible if:
1123    - it is non-empty and it is after the exit block (in BFS order),
1124    - it is after the exit block but before the latch,
1125    - its edges are not normal.
1126 
1127    EXIT_BB is the basic block containing the exit of the LOOP.  BB is
1128    inside LOOP.  */
1129 
1130 static bool
1131 if_convertible_bb_p (class loop *loop, basic_block bb, basic_block exit_bb)
1132 {
1133   edge e;
1134   edge_iterator ei;
1135 
1136   if (dump_file && (dump_flags & TDF_DETAILS))
1137     fprintf (dump_file, "----------[%d]-------------\n", bb->index);
1138 
1139   if (EDGE_COUNT (bb->succs) > 2)
1140     return false;
1141 
1142   gimple *last = last_stmt (bb);
1143   if (gcall *call = safe_dyn_cast <gcall *> (last))
1144     if (gimple_call_ctrl_altering_p (call))
1145       return false;
1146 
1147   if (exit_bb)
1148     {
1149       if (bb != loop->latch)
1150 	{
1151 	  if (dump_file && (dump_flags & TDF_DETAILS))
1152 	    fprintf (dump_file, "basic block after exit bb but before latch\n");
1153 	  return false;
1154 	}
1155       else if (!empty_block_p (bb))
1156 	{
1157 	  if (dump_file && (dump_flags & TDF_DETAILS))
1158 	    fprintf (dump_file, "non empty basic block after exit bb\n");
1159 	  return false;
1160 	}
1161       else if (bb == loop->latch
1162 	       && bb != exit_bb
1163 	       && !dominated_by_p (CDI_DOMINATORS, bb, exit_bb))
1164 	  {
1165 	    if (dump_file && (dump_flags & TDF_DETAILS))
1166 	      fprintf (dump_file, "latch is not dominated by exit_block\n");
1167 	    return false;
1168 	  }
1169     }
1170 
1171   /* Be less adventurous and handle only normal edges.  */
1172   FOR_EACH_EDGE (e, ei, bb->succs)
1173     if (e->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_IRREDUCIBLE_LOOP))
1174       {
1175 	if (dump_file && (dump_flags & TDF_DETAILS))
1176 	  fprintf (dump_file, "Difficult to handle edges\n");
1177 	return false;
1178       }
1179 
1180   return true;
1181 }
1182 
1183 /* Return true when all predecessor blocks of BB are visited.  The
1184    VISITED bitmap keeps track of the visited blocks.  */
1185 
1186 static bool
1187 pred_blocks_visited_p (basic_block bb, bitmap *visited)
1188 {
1189   edge e;
1190   edge_iterator ei;
1191   FOR_EACH_EDGE (e, ei, bb->preds)
1192     if (!bitmap_bit_p (*visited, e->src->index))
1193       return false;
1194 
1195   return true;
1196 }
1197 
1198 /* Get body of a LOOP in suitable order for if-conversion.  It is
1199    caller's responsibility to deallocate basic block list.
1200    If-conversion suitable order is, breadth first sort (BFS) order
1201    with an additional constraint: select a block only if all its
1202    predecessors are already selected.  */
1203 
1204 static basic_block *
1205 get_loop_body_in_if_conv_order (const class loop *loop)
1206 {
1207   basic_block *blocks, *blocks_in_bfs_order;
1208   basic_block bb;
1209   bitmap visited;
1210   unsigned int index = 0;
1211   unsigned int visited_count = 0;
1212 
1213   gcc_assert (loop->num_nodes);
1214   gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1215 
1216   blocks = XCNEWVEC (basic_block, loop->num_nodes);
1217   visited = BITMAP_ALLOC (NULL);
1218 
1219   blocks_in_bfs_order = get_loop_body_in_bfs_order (loop);
1220 
1221   index = 0;
1222   while (index < loop->num_nodes)
1223     {
1224       bb = blocks_in_bfs_order [index];
1225 
1226       if (bb->flags & BB_IRREDUCIBLE_LOOP)
1227 	{
1228 	  free (blocks_in_bfs_order);
1229 	  BITMAP_FREE (visited);
1230 	  free (blocks);
1231 	  return NULL;
1232 	}
1233 
1234       if (!bitmap_bit_p (visited, bb->index))
1235 	{
1236 	  if (pred_blocks_visited_p (bb, &visited)
1237 	      || bb == loop->header)
1238 	    {
1239 	      /* This block is now visited.  */
1240 	      bitmap_set_bit (visited, bb->index);
1241 	      blocks[visited_count++] = bb;
1242 	    }
1243 	}
1244 
1245       index++;
1246 
1247       if (index == loop->num_nodes
1248 	  && visited_count != loop->num_nodes)
1249 	/* Not done yet.  */
1250 	index = 0;
1251     }
1252   free (blocks_in_bfs_order);
1253   BITMAP_FREE (visited);
1254   return blocks;
1255 }
1256 
1257 /* Returns true when the analysis of the predicates for all the basic
1258    blocks in LOOP succeeded.
1259 
1260    predicate_bbs first allocates the predicates of the basic blocks.
1261    These fields are then initialized with the tree expressions
1262    representing the predicates under which a basic block is executed
1263    in the LOOP.  As the loop->header is executed at each iteration, it
1264    has the "true" predicate.  Other statements executed under a
1265    condition are predicated with that condition, for example
1266 
1267    | if (x)
1268    |   S1;
1269    | else
1270    |   S2;
1271 
1272    S1 will be predicated with "x", and
1273    S2 will be predicated with "!x".  */
1274 
1275 static void
1276 predicate_bbs (loop_p loop)
1277 {
1278   unsigned int i;
1279 
1280   for (i = 0; i < loop->num_nodes; i++)
1281     init_bb_predicate (ifc_bbs[i]);
1282 
1283   for (i = 0; i < loop->num_nodes; i++)
1284     {
1285       basic_block bb = ifc_bbs[i];
1286       tree cond;
1287       gimple *stmt;
1288 
1289       /* The loop latch and loop exit block are always executed and
1290 	 have no extra conditions to be processed: skip them.  */
1291       if (bb == loop->latch
1292 	  || bb_with_exit_edge_p (loop, bb))
1293 	{
1294 	  reset_bb_predicate (bb);
1295 	  continue;
1296 	}
1297 
1298       cond = bb_predicate (bb);
1299       stmt = last_stmt (bb);
1300       if (stmt && gimple_code (stmt) == GIMPLE_COND)
1301 	{
1302 	  tree c2;
1303 	  edge true_edge, false_edge;
1304 	  location_t loc = gimple_location (stmt);
1305 	  tree c = build2_loc (loc, gimple_cond_code (stmt),
1306 				    boolean_type_node,
1307 				    gimple_cond_lhs (stmt),
1308 				    gimple_cond_rhs (stmt));
1309 
1310 	  /* Add new condition into destination's predicate list.  */
1311 	  extract_true_false_edges_from_block (gimple_bb (stmt),
1312 					       &true_edge, &false_edge);
1313 
1314 	  /* If C is true, then TRUE_EDGE is taken.  */
1315 	  add_to_dst_predicate_list (loop, true_edge, unshare_expr (cond),
1316 				     unshare_expr (c));
1317 
1318 	  /* If C is false, then FALSE_EDGE is taken.  */
1319 	  c2 = build1_loc (loc, TRUTH_NOT_EXPR, boolean_type_node,
1320 			   unshare_expr (c));
1321 	  add_to_dst_predicate_list (loop, false_edge,
1322 				     unshare_expr (cond), c2);
1323 
1324 	  cond = NULL_TREE;
1325 	}
1326 
1327       /* If current bb has only one successor, then consider it as an
1328 	 unconditional goto.  */
1329       if (single_succ_p (bb))
1330 	{
1331 	  basic_block bb_n = single_succ (bb);
1332 
1333 	  /* The successor bb inherits the predicate of its
1334 	     predecessor.  If there is no predicate in the predecessor
1335 	     bb, then consider the successor bb as always executed.  */
1336 	  if (cond == NULL_TREE)
1337 	    cond = boolean_true_node;
1338 
1339 	  add_to_predicate_list (loop, bb_n, cond);
1340 	}
1341     }
1342 
1343   /* The loop header is always executed.  */
1344   reset_bb_predicate (loop->header);
1345   gcc_assert (bb_predicate_gimplified_stmts (loop->header) == NULL
1346 	      && bb_predicate_gimplified_stmts (loop->latch) == NULL);
1347 }
1348 
1349 /* Build region by adding loop pre-header and post-header blocks.  */
1350 
1351 static vec<basic_block>
1352 build_region (class loop *loop)
1353 {
1354   vec<basic_block> region = vNULL;
1355   basic_block exit_bb = NULL;
1356 
1357   gcc_assert (ifc_bbs);
1358   /* The first element is loop pre-header.  */
1359   region.safe_push (loop_preheader_edge (loop)->src);
1360 
1361   for (unsigned int i = 0; i < loop->num_nodes; i++)
1362     {
1363       basic_block bb = ifc_bbs[i];
1364       region.safe_push (bb);
1365       /* Find loop postheader.  */
1366       edge e;
1367       edge_iterator ei;
1368       FOR_EACH_EDGE (e, ei, bb->succs)
1369 	if (loop_exit_edge_p (loop, e))
1370 	  {
1371 	      exit_bb = e->dest;
1372 	      break;
1373 	  }
1374     }
1375   /* The last element is loop post-header.  */
1376   gcc_assert (exit_bb);
1377   region.safe_push (exit_bb);
1378   return region;
1379 }
1380 
1381 /* Return true when LOOP is if-convertible.  This is a helper function
1382    for if_convertible_loop_p.  REFS and DDRS are initialized and freed
1383    in if_convertible_loop_p.  */
1384 
1385 static bool
1386 if_convertible_loop_p_1 (class loop *loop, vec<data_reference_p> *refs)
1387 {
1388   unsigned int i;
1389   basic_block exit_bb = NULL;
1390   vec<basic_block> region;
1391 
1392   if (find_data_references_in_loop (loop, refs) == chrec_dont_know)
1393     return false;
1394 
1395   calculate_dominance_info (CDI_DOMINATORS);
1396 
1397   /* Allow statements that can be handled during if-conversion.  */
1398   ifc_bbs = get_loop_body_in_if_conv_order (loop);
1399   if (!ifc_bbs)
1400     {
1401       if (dump_file && (dump_flags & TDF_DETAILS))
1402 	fprintf (dump_file, "Irreducible loop\n");
1403       return false;
1404     }
1405 
1406   for (i = 0; i < loop->num_nodes; i++)
1407     {
1408       basic_block bb = ifc_bbs[i];
1409 
1410       if (!if_convertible_bb_p (loop, bb, exit_bb))
1411 	return false;
1412 
1413       if (bb_with_exit_edge_p (loop, bb))
1414 	exit_bb = bb;
1415     }
1416 
1417   for (i = 0; i < loop->num_nodes; i++)
1418     {
1419       basic_block bb = ifc_bbs[i];
1420       gimple_stmt_iterator gsi;
1421 
1422       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1423 	switch (gimple_code (gsi_stmt (gsi)))
1424 	  {
1425 	  case GIMPLE_LABEL:
1426 	  case GIMPLE_ASSIGN:
1427 	  case GIMPLE_CALL:
1428 	  case GIMPLE_DEBUG:
1429 	  case GIMPLE_COND:
1430 	    gimple_set_uid (gsi_stmt (gsi), 0);
1431 	    break;
1432 	  default:
1433 	    return false;
1434 	  }
1435     }
1436 
1437   data_reference_p dr;
1438 
1439   innermost_DR_map
1440 	  = new hash_map<innermost_loop_behavior_hash, data_reference_p>;
1441   baseref_DR_map = new hash_map<tree_operand_hash, data_reference_p>;
1442 
1443   /* Compute post-dominator tree locally.  */
1444   region = build_region (loop);
1445   calculate_dominance_info_for_region (CDI_POST_DOMINATORS, region);
1446 
1447   predicate_bbs (loop);
1448 
1449   /* Free post-dominator tree since it is not used after predication.  */
1450   free_dominance_info_for_region (cfun, CDI_POST_DOMINATORS, region);
1451   region.release ();
1452 
1453   for (i = 0; refs->iterate (i, &dr); i++)
1454     {
1455       tree ref = DR_REF (dr);
1456 
1457       dr->aux = XNEW (struct ifc_dr);
1458       DR_BASE_W_UNCONDITIONALLY (dr) = false;
1459       DR_RW_UNCONDITIONALLY (dr) = false;
1460       DR_W_UNCONDITIONALLY (dr) = false;
1461       IFC_DR (dr)->rw_predicate = boolean_false_node;
1462       IFC_DR (dr)->w_predicate = boolean_false_node;
1463       IFC_DR (dr)->base_w_predicate = boolean_false_node;
1464       if (gimple_uid (DR_STMT (dr)) == 0)
1465 	gimple_set_uid (DR_STMT (dr), i + 1);
1466 
1467       /* If DR doesn't have innermost loop behavior or it's a compound
1468          memory reference, we synthesize its innermost loop behavior
1469          for hashing.  */
1470       if (TREE_CODE (ref) == COMPONENT_REF
1471           || TREE_CODE (ref) == IMAGPART_EXPR
1472           || TREE_CODE (ref) == REALPART_EXPR
1473           || !(DR_BASE_ADDRESS (dr) || DR_OFFSET (dr)
1474 	       || DR_INIT (dr) || DR_STEP (dr)))
1475         {
1476           while (TREE_CODE (ref) == COMPONENT_REF
1477 	         || TREE_CODE (ref) == IMAGPART_EXPR
1478 	         || TREE_CODE (ref) == REALPART_EXPR)
1479 	    ref = TREE_OPERAND (ref, 0);
1480 
1481 	  memset (&DR_INNERMOST (dr), 0, sizeof (DR_INNERMOST (dr)));
1482 	  DR_BASE_ADDRESS (dr) = ref;
1483         }
1484       hash_memrefs_baserefs_and_store_DRs_read_written_info (dr);
1485     }
1486 
1487   for (i = 0; i < loop->num_nodes; i++)
1488     {
1489       basic_block bb = ifc_bbs[i];
1490       gimple_stmt_iterator itr;
1491 
1492       /* Check the if-convertibility of statements in predicated BBs.  */
1493       if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1494 	for (itr = gsi_start_bb (bb); !gsi_end_p (itr); gsi_next (&itr))
1495 	  if (!if_convertible_stmt_p (gsi_stmt (itr), *refs))
1496 	    return false;
1497     }
1498 
1499   /* Checking PHIs needs to be done after stmts, as the fact whether there
1500      are any masked loads or stores affects the tests.  */
1501   for (i = 0; i < loop->num_nodes; i++)
1502     {
1503       basic_block bb = ifc_bbs[i];
1504       gphi_iterator itr;
1505 
1506       for (itr = gsi_start_phis (bb); !gsi_end_p (itr); gsi_next (&itr))
1507 	if (!if_convertible_phi_p (loop, bb, itr.phi ()))
1508 	  return false;
1509     }
1510 
1511   if (dump_file)
1512     fprintf (dump_file, "Applying if-conversion\n");
1513 
1514   return true;
1515 }
1516 
1517 /* Return true when LOOP is if-convertible.
1518    LOOP is if-convertible if:
1519    - it is innermost,
1520    - it has two or more basic blocks,
1521    - it has only one exit,
1522    - loop header is not the exit edge,
1523    - if its basic blocks and phi nodes are if convertible.  */
1524 
1525 static bool
1526 if_convertible_loop_p (class loop *loop)
1527 {
1528   edge e;
1529   edge_iterator ei;
1530   bool res = false;
1531   vec<data_reference_p> refs;
1532 
1533   /* Handle only innermost loop.  */
1534   if (!loop || loop->inner)
1535     {
1536       if (dump_file && (dump_flags & TDF_DETAILS))
1537 	fprintf (dump_file, "not innermost loop\n");
1538       return false;
1539     }
1540 
1541   /* If only one block, no need for if-conversion.  */
1542   if (loop->num_nodes <= 2)
1543     {
1544       if (dump_file && (dump_flags & TDF_DETAILS))
1545 	fprintf (dump_file, "less than 2 basic blocks\n");
1546       return false;
1547     }
1548 
1549   /* More than one loop exit is too much to handle.  */
1550   if (!single_exit (loop))
1551     {
1552       if (dump_file && (dump_flags & TDF_DETAILS))
1553 	fprintf (dump_file, "multiple exits\n");
1554       return false;
1555     }
1556 
1557   /* If one of the loop header's edge is an exit edge then do not
1558      apply if-conversion.  */
1559   FOR_EACH_EDGE (e, ei, loop->header->succs)
1560     if (loop_exit_edge_p (loop, e))
1561       return false;
1562 
1563   refs.create (5);
1564   res = if_convertible_loop_p_1 (loop, &refs);
1565 
1566   data_reference_p dr;
1567   unsigned int i;
1568   for (i = 0; refs.iterate (i, &dr); i++)
1569     free (dr->aux);
1570 
1571   free_data_refs (refs);
1572 
1573   delete innermost_DR_map;
1574   innermost_DR_map = NULL;
1575 
1576   delete baseref_DR_map;
1577   baseref_DR_map = NULL;
1578 
1579   return res;
1580 }
1581 
1582 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1583    which is in predicated basic block.
1584    In fact, the following PHI pattern is searching:
1585       loop-header:
1586 	reduc_1 = PHI <..., reduc_2>
1587       ...
1588 	if (...)
1589 	  reduc_3 = ...
1590 	reduc_2 = PHI <reduc_1, reduc_3>
1591 
1592    ARG_0 and ARG_1 are correspondent PHI arguments.
1593    REDUC, OP0 and OP1 contain reduction stmt and its operands.
1594    EXTENDED is true if PHI has > 2 arguments.  */
1595 
1596 static bool
1597 is_cond_scalar_reduction (gimple *phi, gimple **reduc, tree arg_0, tree arg_1,
1598 			  tree *op0, tree *op1, bool extended)
1599 {
1600   tree lhs, r_op1, r_op2;
1601   gimple *stmt;
1602   gimple *header_phi = NULL;
1603   enum tree_code reduction_op;
1604   basic_block bb = gimple_bb (phi);
1605   class loop *loop = bb->loop_father;
1606   edge latch_e = loop_latch_edge (loop);
1607   imm_use_iterator imm_iter;
1608   use_operand_p use_p;
1609   edge e;
1610   edge_iterator ei;
1611   bool result = false;
1612   if (TREE_CODE (arg_0) != SSA_NAME || TREE_CODE (arg_1) != SSA_NAME)
1613     return false;
1614 
1615   if (!extended && gimple_code (SSA_NAME_DEF_STMT (arg_0)) == GIMPLE_PHI)
1616     {
1617       lhs = arg_1;
1618       header_phi = SSA_NAME_DEF_STMT (arg_0);
1619       stmt = SSA_NAME_DEF_STMT (arg_1);
1620     }
1621   else if (gimple_code (SSA_NAME_DEF_STMT (arg_1)) == GIMPLE_PHI)
1622     {
1623       lhs = arg_0;
1624       header_phi = SSA_NAME_DEF_STMT (arg_1);
1625       stmt = SSA_NAME_DEF_STMT (arg_0);
1626     }
1627   else
1628     return false;
1629   if (gimple_bb (header_phi) != loop->header)
1630     return false;
1631 
1632   if (PHI_ARG_DEF_FROM_EDGE (header_phi, latch_e) != PHI_RESULT (phi))
1633     return false;
1634 
1635   if (gimple_code (stmt) != GIMPLE_ASSIGN
1636       || gimple_has_volatile_ops (stmt))
1637     return false;
1638 
1639   if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
1640     return false;
1641 
1642   if (!is_predicated (gimple_bb (stmt)))
1643     return false;
1644 
1645   /* Check that stmt-block is predecessor of phi-block.  */
1646   FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1647     if (e->dest == bb)
1648       {
1649 	result = true;
1650 	break;
1651       }
1652   if (!result)
1653     return false;
1654 
1655   if (!has_single_use (lhs))
1656     return false;
1657 
1658   reduction_op = gimple_assign_rhs_code (stmt);
1659   if (reduction_op != PLUS_EXPR && reduction_op != MINUS_EXPR)
1660     return false;
1661   r_op1 = gimple_assign_rhs1 (stmt);
1662   r_op2 = gimple_assign_rhs2 (stmt);
1663 
1664   /* Make R_OP1 to hold reduction variable.  */
1665   if (r_op2 == PHI_RESULT (header_phi)
1666       && reduction_op == PLUS_EXPR)
1667     std::swap (r_op1, r_op2);
1668   else if (r_op1 != PHI_RESULT (header_phi))
1669     return false;
1670 
1671   /* Check that R_OP1 is used in reduction stmt or in PHI only.  */
1672   FOR_EACH_IMM_USE_FAST (use_p, imm_iter, r_op1)
1673     {
1674       gimple *use_stmt = USE_STMT (use_p);
1675       if (is_gimple_debug (use_stmt))
1676 	continue;
1677       if (use_stmt == stmt)
1678 	continue;
1679       if (gimple_code (use_stmt) != GIMPLE_PHI)
1680 	return false;
1681     }
1682 
1683   *op0 = r_op1; *op1 = r_op2;
1684   *reduc = stmt;
1685   return true;
1686 }
1687 
1688 /* Converts conditional scalar reduction into unconditional form, e.g.
1689      bb_4
1690        if (_5 != 0) goto bb_5 else goto bb_6
1691      end_bb_4
1692      bb_5
1693        res_6 = res_13 + 1;
1694      end_bb_5
1695      bb_6
1696        # res_2 = PHI <res_13(4), res_6(5)>
1697      end_bb_6
1698 
1699    will be converted into sequence
1700     _ifc__1 = _5 != 0 ? 1 : 0;
1701     res_2 = res_13 + _ifc__1;
1702   Argument SWAP tells that arguments of conditional expression should be
1703   swapped.
1704   Returns rhs of resulting PHI assignment.  */
1705 
1706 static tree
1707 convert_scalar_cond_reduction (gimple *reduc, gimple_stmt_iterator *gsi,
1708 			       tree cond, tree op0, tree op1, bool swap)
1709 {
1710   gimple_stmt_iterator stmt_it;
1711   gimple *new_assign;
1712   tree rhs;
1713   tree rhs1 = gimple_assign_rhs1 (reduc);
1714   tree tmp = make_temp_ssa_name (TREE_TYPE (rhs1), NULL, "_ifc_");
1715   tree c;
1716   tree zero = build_zero_cst (TREE_TYPE (rhs1));
1717 
1718   if (dump_file && (dump_flags & TDF_DETAILS))
1719     {
1720       fprintf (dump_file, "Found cond scalar reduction.\n");
1721       print_gimple_stmt (dump_file, reduc, 0, TDF_SLIM);
1722     }
1723 
1724   /* Build cond expression using COND and constant operand
1725      of reduction rhs.  */
1726   c = fold_build_cond_expr (TREE_TYPE (rhs1),
1727 			    unshare_expr (cond),
1728 			    swap ? zero : op1,
1729 			    swap ? op1 : zero);
1730 
1731   /* Create assignment stmt and insert it at GSI.  */
1732   new_assign = gimple_build_assign (tmp, c);
1733   gsi_insert_before (gsi, new_assign, GSI_SAME_STMT);
1734   /* Build rhs for unconditional increment/decrement.  */
1735   rhs = fold_build2 (gimple_assign_rhs_code (reduc),
1736 		     TREE_TYPE (rhs1), op0, tmp);
1737 
1738   /* Delete original reduction stmt.  */
1739   stmt_it = gsi_for_stmt (reduc);
1740   gsi_remove (&stmt_it, true);
1741   release_defs (reduc);
1742   return rhs;
1743 }
1744 
1745 /* Produce condition for all occurrences of ARG in PHI node.  */
1746 
1747 static tree
1748 gen_phi_arg_condition (gphi *phi, vec<int> *occur,
1749 		       gimple_stmt_iterator *gsi)
1750 {
1751   int len;
1752   int i;
1753   tree cond = NULL_TREE;
1754   tree c;
1755   edge e;
1756 
1757   len = occur->length ();
1758   gcc_assert (len > 0);
1759   for (i = 0; i < len; i++)
1760     {
1761       e = gimple_phi_arg_edge (phi, (*occur)[i]);
1762       c = bb_predicate (e->src);
1763       if (is_true_predicate (c))
1764 	{
1765 	  cond = c;
1766 	  break;
1767 	}
1768       c = force_gimple_operand_gsi_1 (gsi, unshare_expr (c),
1769 				      is_gimple_condexpr, NULL_TREE,
1770 				      true, GSI_SAME_STMT);
1771       if (cond != NULL_TREE)
1772 	{
1773 	  /* Must build OR expression.  */
1774 	  cond = fold_or_predicates (EXPR_LOCATION (c), c, cond);
1775 	  cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1776 					     is_gimple_condexpr, NULL_TREE,
1777 					     true, GSI_SAME_STMT);
1778 	}
1779       else
1780 	cond = c;
1781     }
1782   gcc_assert (cond != NULL_TREE);
1783   return cond;
1784 }
1785 
1786 /* Local valueization callback that follows all-use SSA edges.  */
1787 
1788 static tree
1789 ifcvt_follow_ssa_use_edges (tree val)
1790 {
1791   return val;
1792 }
1793 
1794 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1795    This routine can handle PHI nodes with more than two arguments.
1796 
1797    For example,
1798      S1: A = PHI <x1(1), x2(5)>
1799    is converted into,
1800      S2: A = cond ? x1 : x2;
1801 
1802    The generated code is inserted at GSI that points to the top of
1803    basic block's statement list.
1804    If PHI node has more than two arguments a chain of conditional
1805    expression is produced.  */
1806 
1807 
1808 static void
1809 predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
1810 {
1811   gimple *new_stmt = NULL, *reduc;
1812   tree rhs, res, arg0, arg1, op0, op1, scev;
1813   tree cond;
1814   unsigned int index0;
1815   unsigned int max, args_len;
1816   edge e;
1817   basic_block bb;
1818   unsigned int i;
1819 
1820   res = gimple_phi_result (phi);
1821   if (virtual_operand_p (res))
1822     return;
1823 
1824   if ((rhs = degenerate_phi_result (phi))
1825       || ((scev = analyze_scalar_evolution (gimple_bb (phi)->loop_father,
1826 					    res))
1827 	  && !chrec_contains_undetermined (scev)
1828 	  && scev != res
1829 	  && (rhs = gimple_phi_arg_def (phi, 0))))
1830     {
1831       if (dump_file && (dump_flags & TDF_DETAILS))
1832 	{
1833 	  fprintf (dump_file, "Degenerate phi!\n");
1834 	  print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
1835 	}
1836       new_stmt = gimple_build_assign (res, rhs);
1837       gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1838       update_stmt (new_stmt);
1839       return;
1840     }
1841 
1842   bb = gimple_bb (phi);
1843   if (EDGE_COUNT (bb->preds) == 2)
1844     {
1845       /* Predicate ordinary PHI node with 2 arguments.  */
1846       edge first_edge, second_edge;
1847       basic_block true_bb;
1848       first_edge = EDGE_PRED (bb, 0);
1849       second_edge = EDGE_PRED (bb, 1);
1850       cond = bb_predicate (first_edge->src);
1851       if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1852 	std::swap (first_edge, second_edge);
1853       if (EDGE_COUNT (first_edge->src->succs) > 1)
1854 	{
1855 	  cond = bb_predicate (second_edge->src);
1856 	  if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1857 	    cond = TREE_OPERAND (cond, 0);
1858 	  else
1859 	    first_edge = second_edge;
1860 	}
1861       else
1862 	cond = bb_predicate (first_edge->src);
1863       /* Gimplify the condition to a valid cond-expr conditonal operand.  */
1864       cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1865 					 is_gimple_condexpr, NULL_TREE,
1866 					 true, GSI_SAME_STMT);
1867       true_bb = first_edge->src;
1868       if (EDGE_PRED (bb, 1)->src == true_bb)
1869 	{
1870 	  arg0 = gimple_phi_arg_def (phi, 1);
1871 	  arg1 = gimple_phi_arg_def (phi, 0);
1872 	}
1873       else
1874 	{
1875 	  arg0 = gimple_phi_arg_def (phi, 0);
1876 	  arg1 = gimple_phi_arg_def (phi, 1);
1877 	}
1878       if (is_cond_scalar_reduction (phi, &reduc, arg0, arg1,
1879 				    &op0, &op1, false))
1880 	/* Convert reduction stmt into vectorizable form.  */
1881 	rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1882 					     true_bb != gimple_bb (reduc));
1883       else
1884 	/* Build new RHS using selected condition and arguments.  */
1885 	rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1886 				    arg0, arg1);
1887       new_stmt = gimple_build_assign (res, rhs);
1888       gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1889       gimple_stmt_iterator new_gsi = gsi_for_stmt (new_stmt);
1890       if (fold_stmt (&new_gsi, ifcvt_follow_ssa_use_edges))
1891 	{
1892 	  new_stmt = gsi_stmt (new_gsi);
1893 	  update_stmt (new_stmt);
1894 	}
1895 
1896       if (dump_file && (dump_flags & TDF_DETAILS))
1897 	{
1898 	  fprintf (dump_file, "new phi replacement stmt\n");
1899 	  print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1900 	}
1901       return;
1902     }
1903 
1904   /* Create hashmap for PHI node which contain vector of argument indexes
1905      having the same value.  */
1906   bool swap = false;
1907   hash_map<tree_operand_hash, auto_vec<int> > phi_arg_map;
1908   unsigned int num_args = gimple_phi_num_args (phi);
1909   int max_ind = -1;
1910   /* Vector of different PHI argument values.  */
1911   auto_vec<tree> args (num_args);
1912 
1913   /* Compute phi_arg_map.  */
1914   for (i = 0; i < num_args; i++)
1915     {
1916       tree arg;
1917 
1918       arg = gimple_phi_arg_def (phi, i);
1919       if (!phi_arg_map.get (arg))
1920 	args.quick_push (arg);
1921       phi_arg_map.get_or_insert (arg).safe_push (i);
1922     }
1923 
1924   /* Determine element with max number of occurrences.  */
1925   max_ind = -1;
1926   max = 1;
1927   args_len = args.length ();
1928   for (i = 0; i < args_len; i++)
1929     {
1930       unsigned int len;
1931       if ((len = phi_arg_map.get (args[i])->length ()) > max)
1932 	{
1933 	  max_ind = (int) i;
1934 	  max = len;
1935 	}
1936     }
1937 
1938   /* Put element with max number of occurences to the end of ARGS.  */
1939   if (max_ind != -1 && max_ind +1 != (int) args_len)
1940     std::swap (args[args_len - 1], args[max_ind]);
1941 
1942   /* Handle one special case when number of arguments with different values
1943      is equal 2 and one argument has the only occurrence.  Such PHI can be
1944      handled as if would have only 2 arguments.  */
1945   if (args_len == 2 && phi_arg_map.get (args[0])->length () == 1)
1946     {
1947       vec<int> *indexes;
1948       indexes = phi_arg_map.get (args[0]);
1949       index0 = (*indexes)[0];
1950       arg0 = args[0];
1951       arg1 = args[1];
1952       e = gimple_phi_arg_edge (phi, index0);
1953       cond = bb_predicate (e->src);
1954       if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1955 	{
1956 	  swap = true;
1957 	  cond = TREE_OPERAND (cond, 0);
1958 	}
1959       /* Gimplify the condition to a valid cond-expr conditonal operand.  */
1960       cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1961 					 is_gimple_condexpr, NULL_TREE,
1962 					 true, GSI_SAME_STMT);
1963       if (!(is_cond_scalar_reduction (phi, &reduc, arg0 , arg1,
1964 				      &op0, &op1, true)))
1965 	rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1966 				    swap? arg1 : arg0,
1967 				    swap? arg0 : arg1);
1968       else
1969 	/* Convert reduction stmt into vectorizable form.  */
1970 	rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1971 					     swap);
1972       new_stmt = gimple_build_assign (res, rhs);
1973       gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1974       update_stmt (new_stmt);
1975     }
1976   else
1977     {
1978       /* Common case.  */
1979       vec<int> *indexes;
1980       tree type = TREE_TYPE (gimple_phi_result (phi));
1981       tree lhs;
1982       arg1 = args[1];
1983       for (i = 0; i < args_len; i++)
1984 	{
1985 	  arg0 = args[i];
1986 	  indexes = phi_arg_map.get (args[i]);
1987 	  if (i != args_len - 1)
1988 	    lhs = make_temp_ssa_name (type, NULL, "_ifc_");
1989 	  else
1990 	    lhs = res;
1991 	  cond = gen_phi_arg_condition (phi, indexes, gsi);
1992 	  rhs = fold_build_cond_expr (type, unshare_expr (cond),
1993 				      arg0, arg1);
1994 	  new_stmt = gimple_build_assign (lhs, rhs);
1995 	  gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1996 	  update_stmt (new_stmt);
1997 	  arg1 = lhs;
1998 	}
1999     }
2000 
2001   if (dump_file && (dump_flags & TDF_DETAILS))
2002     {
2003       fprintf (dump_file, "new extended phi replacement stmt\n");
2004       print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
2005     }
2006 }
2007 
2008 /* Replaces in LOOP all the scalar phi nodes other than those in the
2009    LOOP->header block with conditional modify expressions.  */
2010 
2011 static void
2012 predicate_all_scalar_phis (class loop *loop)
2013 {
2014   basic_block bb;
2015   unsigned int orig_loop_num_nodes = loop->num_nodes;
2016   unsigned int i;
2017 
2018   for (i = 1; i < orig_loop_num_nodes; i++)
2019     {
2020       gphi *phi;
2021       gimple_stmt_iterator gsi;
2022       gphi_iterator phi_gsi;
2023       bb = ifc_bbs[i];
2024 
2025       if (bb == loop->header)
2026 	continue;
2027 
2028       phi_gsi = gsi_start_phis (bb);
2029       if (gsi_end_p (phi_gsi))
2030 	continue;
2031 
2032       gsi = gsi_after_labels (bb);
2033       while (!gsi_end_p (phi_gsi))
2034 	{
2035 	  phi = phi_gsi.phi ();
2036 	  if (virtual_operand_p (gimple_phi_result (phi)))
2037 	    gsi_next (&phi_gsi);
2038 	  else
2039 	    {
2040 	      predicate_scalar_phi (phi, &gsi);
2041 	      remove_phi_node (&phi_gsi, false);
2042 	    }
2043 	}
2044     }
2045 }
2046 
2047 /* Insert in each basic block of LOOP the statements produced by the
2048    gimplification of the predicates.  */
2049 
2050 static void
2051 insert_gimplified_predicates (loop_p loop)
2052 {
2053   unsigned int i;
2054 
2055   for (i = 0; i < loop->num_nodes; i++)
2056     {
2057       basic_block bb = ifc_bbs[i];
2058       gimple_seq stmts;
2059       if (!is_predicated (bb))
2060 	gcc_assert (bb_predicate_gimplified_stmts (bb) == NULL);
2061       if (!is_predicated (bb))
2062 	{
2063 	  /* Do not insert statements for a basic block that is not
2064 	     predicated.  Also make sure that the predicate of the
2065 	     basic block is set to true.  */
2066 	  reset_bb_predicate (bb);
2067 	  continue;
2068 	}
2069 
2070       stmts = bb_predicate_gimplified_stmts (bb);
2071       if (stmts)
2072 	{
2073 	  if (need_to_predicate)
2074 	    {
2075 	      /* Insert the predicate of the BB just after the label,
2076 		 as the if-conversion of memory writes will use this
2077 		 predicate.  */
2078 	      gimple_stmt_iterator gsi = gsi_after_labels (bb);
2079 	      gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2080 	    }
2081 	  else
2082 	    {
2083 	      /* Insert the predicate of the BB at the end of the BB
2084 		 as this would reduce the register pressure: the only
2085 		 use of this predicate will be in successor BBs.  */
2086 	      gimple_stmt_iterator gsi = gsi_last_bb (bb);
2087 
2088 	      if (gsi_end_p (gsi)
2089 		  || stmt_ends_bb_p (gsi_stmt (gsi)))
2090 		gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2091 	      else
2092 		gsi_insert_seq_after (&gsi, stmts, GSI_SAME_STMT);
2093 	    }
2094 
2095 	  /* Once the sequence is code generated, set it to NULL.  */
2096 	  set_bb_predicate_gimplified_stmts (bb, NULL);
2097 	}
2098     }
2099 }
2100 
2101 /* Helper function for predicate_statements. Returns index of existent
2102    mask if it was created for given SIZE and -1 otherwise.  */
2103 
2104 static int
2105 mask_exists (int size, vec<int> vec)
2106 {
2107   unsigned int ix;
2108   int v;
2109   FOR_EACH_VEC_ELT (vec, ix, v)
2110     if (v == size)
2111       return (int) ix;
2112   return -1;
2113 }
2114 
2115 /* Helper function for predicate_statements.  STMT is a memory read or
2116    write and it needs to be predicated by MASK.  Return a statement
2117    that does so.  */
2118 
2119 static gimple *
2120 predicate_load_or_store (gimple_stmt_iterator *gsi, gassign *stmt, tree mask)
2121 {
2122   gcall *new_stmt;
2123 
2124   tree lhs = gimple_assign_lhs (stmt);
2125   tree rhs = gimple_assign_rhs1 (stmt);
2126   tree ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
2127   mark_addressable (ref);
2128   tree addr = force_gimple_operand_gsi (gsi, build_fold_addr_expr (ref),
2129 					true, NULL_TREE, true, GSI_SAME_STMT);
2130   tree ptr = build_int_cst (reference_alias_ptr_type (ref),
2131 			    get_object_alignment (ref));
2132   /* Copy points-to info if possible.  */
2133   if (TREE_CODE (addr) == SSA_NAME && !SSA_NAME_PTR_INFO (addr))
2134     copy_ref_info (build2 (MEM_REF, TREE_TYPE (ref), addr, ptr),
2135 		   ref);
2136   if (TREE_CODE (lhs) == SSA_NAME)
2137     {
2138       new_stmt
2139 	= gimple_build_call_internal (IFN_MASK_LOAD, 3, addr,
2140 				      ptr, mask);
2141       gimple_call_set_lhs (new_stmt, lhs);
2142       gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2143     }
2144   else
2145     {
2146       new_stmt
2147 	= gimple_build_call_internal (IFN_MASK_STORE, 4, addr, ptr,
2148 				      mask, rhs);
2149       gimple_move_vops (new_stmt, stmt);
2150     }
2151   gimple_call_set_nothrow (new_stmt, true);
2152   return new_stmt;
2153 }
2154 
2155 /* STMT uses OP_LHS.  Check whether it is equivalent to:
2156 
2157      ... = OP_MASK ? OP_LHS : X;
2158 
2159    Return X if so, otherwise return null.  OP_MASK is an SSA_NAME that is
2160    known to have value OP_COND.  */
2161 
2162 static tree
2163 check_redundant_cond_expr (gimple *stmt, tree op_mask, tree op_cond,
2164 			   tree op_lhs)
2165 {
2166   gassign *assign = dyn_cast <gassign *> (stmt);
2167   if (!assign || gimple_assign_rhs_code (assign) != COND_EXPR)
2168     return NULL_TREE;
2169 
2170   tree use_cond = gimple_assign_rhs1 (assign);
2171   tree if_true = gimple_assign_rhs2 (assign);
2172   tree if_false = gimple_assign_rhs3 (assign);
2173 
2174   if ((use_cond == op_mask || operand_equal_p (use_cond, op_cond, 0))
2175       && if_true == op_lhs)
2176     return if_false;
2177 
2178   if (inverse_conditions_p (use_cond, op_cond) && if_false == op_lhs)
2179     return if_true;
2180 
2181   return NULL_TREE;
2182 }
2183 
2184 /* Return true if VALUE is available for use at STMT.  SSA_NAMES is
2185    the set of SSA names defined earlier in STMT's block.  */
2186 
2187 static bool
2188 value_available_p (gimple *stmt, hash_set<tree_ssa_name_hash> *ssa_names,
2189 		   tree value)
2190 {
2191   if (is_gimple_min_invariant (value))
2192     return true;
2193 
2194   if (TREE_CODE (value) == SSA_NAME)
2195     {
2196       if (SSA_NAME_IS_DEFAULT_DEF (value))
2197 	return true;
2198 
2199       basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (value));
2200       basic_block use_bb = gimple_bb (stmt);
2201       return (def_bb == use_bb
2202 	      ? ssa_names->contains (value)
2203 	      : dominated_by_p (CDI_DOMINATORS, use_bb, def_bb));
2204     }
2205 
2206   return false;
2207 }
2208 
2209 /* Helper function for predicate_statements.  STMT is a potentially-trapping
2210    arithmetic operation that needs to be predicated by MASK, an SSA_NAME that
2211    has value COND.  Return a statement that does so.  SSA_NAMES is the set of
2212    SSA names defined earlier in STMT's block.  */
2213 
2214 static gimple *
2215 predicate_rhs_code (gassign *stmt, tree mask, tree cond,
2216 		    hash_set<tree_ssa_name_hash> *ssa_names)
2217 {
2218   tree lhs = gimple_assign_lhs (stmt);
2219   tree_code code = gimple_assign_rhs_code (stmt);
2220   unsigned int nops = gimple_num_ops (stmt);
2221   internal_fn cond_fn = get_conditional_internal_fn (code);
2222 
2223   /* Construct the arguments to the conditional internal function.   */
2224   auto_vec<tree, 8> args;
2225   args.safe_grow (nops + 1);
2226   args[0] = mask;
2227   for (unsigned int i = 1; i < nops; ++i)
2228     args[i] = gimple_op (stmt, i);
2229   args[nops] = NULL_TREE;
2230 
2231   /* Look for uses of the result to see whether they are COND_EXPRs that can
2232      be folded into the conditional call.  */
2233   imm_use_iterator imm_iter;
2234   gimple *use_stmt;
2235   FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
2236     {
2237       tree new_else = check_redundant_cond_expr (use_stmt, mask, cond, lhs);
2238       if (new_else && value_available_p (stmt, ssa_names, new_else))
2239 	{
2240 	  if (!args[nops])
2241 	    args[nops] = new_else;
2242 	  if (operand_equal_p (new_else, args[nops], 0))
2243 	    {
2244 	      /* We have:
2245 
2246 		   LHS = IFN_COND (MASK, ..., ELSE);
2247 		   X = MASK ? LHS : ELSE;
2248 
2249 		 which makes X equivalent to LHS.  */
2250 	      tree use_lhs = gimple_assign_lhs (use_stmt);
2251 	      redundant_ssa_names.safe_push (std::make_pair (use_lhs, lhs));
2252 	    }
2253 	}
2254     }
2255   if (!args[nops])
2256     args[nops] = targetm.preferred_else_value (cond_fn, TREE_TYPE (lhs),
2257 					       nops - 1, &args[1]);
2258 
2259   /* Create and insert the call.  */
2260   gcall *new_stmt = gimple_build_call_internal_vec (cond_fn, args);
2261   gimple_call_set_lhs (new_stmt, lhs);
2262   gimple_call_set_nothrow (new_stmt, true);
2263 
2264   return new_stmt;
2265 }
2266 
2267 /* Predicate each write to memory in LOOP.
2268 
2269    This function transforms control flow constructs containing memory
2270    writes of the form:
2271 
2272    | for (i = 0; i < N; i++)
2273    |   if (cond)
2274    |     A[i] = expr;
2275 
2276    into the following form that does not contain control flow:
2277 
2278    | for (i = 0; i < N; i++)
2279    |   A[i] = cond ? expr : A[i];
2280 
2281    The original CFG looks like this:
2282 
2283    | bb_0
2284    |   i = 0
2285    | end_bb_0
2286    |
2287    | bb_1
2288    |   if (i < N) goto bb_5 else goto bb_2
2289    | end_bb_1
2290    |
2291    | bb_2
2292    |   cond = some_computation;
2293    |   if (cond) goto bb_3 else goto bb_4
2294    | end_bb_2
2295    |
2296    | bb_3
2297    |   A[i] = expr;
2298    |   goto bb_4
2299    | end_bb_3
2300    |
2301    | bb_4
2302    |   goto bb_1
2303    | end_bb_4
2304 
2305    insert_gimplified_predicates inserts the computation of the COND
2306    expression at the beginning of the destination basic block:
2307 
2308    | bb_0
2309    |   i = 0
2310    | end_bb_0
2311    |
2312    | bb_1
2313    |   if (i < N) goto bb_5 else goto bb_2
2314    | end_bb_1
2315    |
2316    | bb_2
2317    |   cond = some_computation;
2318    |   if (cond) goto bb_3 else goto bb_4
2319    | end_bb_2
2320    |
2321    | bb_3
2322    |   cond = some_computation;
2323    |   A[i] = expr;
2324    |   goto bb_4
2325    | end_bb_3
2326    |
2327    | bb_4
2328    |   goto bb_1
2329    | end_bb_4
2330 
2331    predicate_statements is then predicating the memory write as follows:
2332 
2333    | bb_0
2334    |   i = 0
2335    | end_bb_0
2336    |
2337    | bb_1
2338    |   if (i < N) goto bb_5 else goto bb_2
2339    | end_bb_1
2340    |
2341    | bb_2
2342    |   if (cond) goto bb_3 else goto bb_4
2343    | end_bb_2
2344    |
2345    | bb_3
2346    |   cond = some_computation;
2347    |   A[i] = cond ? expr : A[i];
2348    |   goto bb_4
2349    | end_bb_3
2350    |
2351    | bb_4
2352    |   goto bb_1
2353    | end_bb_4
2354 
2355    and finally combine_blocks removes the basic block boundaries making
2356    the loop vectorizable:
2357 
2358    | bb_0
2359    |   i = 0
2360    |   if (i < N) goto bb_5 else goto bb_1
2361    | end_bb_0
2362    |
2363    | bb_1
2364    |   cond = some_computation;
2365    |   A[i] = cond ? expr : A[i];
2366    |   if (i < N) goto bb_5 else goto bb_4
2367    | end_bb_1
2368    |
2369    | bb_4
2370    |   goto bb_1
2371    | end_bb_4
2372 */
2373 
2374 static void
2375 predicate_statements (loop_p loop)
2376 {
2377   unsigned int i, orig_loop_num_nodes = loop->num_nodes;
2378   auto_vec<int, 1> vect_sizes;
2379   auto_vec<tree, 1> vect_masks;
2380   hash_set<tree_ssa_name_hash> ssa_names;
2381 
2382   for (i = 1; i < orig_loop_num_nodes; i++)
2383     {
2384       gimple_stmt_iterator gsi;
2385       basic_block bb = ifc_bbs[i];
2386       tree cond = bb_predicate (bb);
2387       bool swap;
2388       int index;
2389 
2390       if (is_true_predicate (cond))
2391 	continue;
2392 
2393       swap = false;
2394       if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
2395 	{
2396 	  swap = true;
2397 	  cond = TREE_OPERAND (cond, 0);
2398 	}
2399 
2400       vect_sizes.truncate (0);
2401       vect_masks.truncate (0);
2402 
2403       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
2404 	{
2405 	  gassign *stmt = dyn_cast <gassign *> (gsi_stmt (gsi));
2406 	  if (!stmt)
2407 	    ;
2408 	  else if (is_false_predicate (cond)
2409 		   && gimple_vdef (stmt))
2410 	    {
2411 	      unlink_stmt_vdef (stmt);
2412 	      gsi_remove (&gsi, true);
2413 	      release_defs (stmt);
2414 	      continue;
2415 	    }
2416 	  else if (gimple_plf (stmt, GF_PLF_2))
2417 	    {
2418 	      tree lhs = gimple_assign_lhs (stmt);
2419 	      tree mask;
2420 	      gimple *new_stmt;
2421 	      gimple_seq stmts = NULL;
2422 	      machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
2423 	      /* We checked before setting GF_PLF_2 that an equivalent
2424 		 integer mode exists.  */
2425 	      int bitsize = GET_MODE_BITSIZE (mode).to_constant ();
2426 	      if (!vect_sizes.is_empty ()
2427 		  && (index = mask_exists (bitsize, vect_sizes)) != -1)
2428 		/* Use created mask.  */
2429 		mask = vect_masks[index];
2430 	      else
2431 		{
2432 		  if (COMPARISON_CLASS_P (cond))
2433 		    mask = gimple_build (&stmts, TREE_CODE (cond),
2434 					 boolean_type_node,
2435 					 TREE_OPERAND (cond, 0),
2436 					 TREE_OPERAND (cond, 1));
2437 		  else
2438 		    mask = cond;
2439 
2440 		  if (swap)
2441 		    {
2442 		      tree true_val
2443 			= constant_boolean_node (true, TREE_TYPE (mask));
2444 		      mask = gimple_build (&stmts, BIT_XOR_EXPR,
2445 					   TREE_TYPE (mask), mask, true_val);
2446 		    }
2447 		  gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2448 
2449 		  /* Save mask and its size for further use.  */
2450 		  vect_sizes.safe_push (bitsize);
2451 		  vect_masks.safe_push (mask);
2452 		}
2453 	      if (gimple_assign_single_p (stmt))
2454 		new_stmt = predicate_load_or_store (&gsi, stmt, mask);
2455 	      else
2456 		new_stmt = predicate_rhs_code (stmt, mask, cond, &ssa_names);
2457 
2458 	      gsi_replace (&gsi, new_stmt, true);
2459 	    }
2460 	  else if (gimple_vdef (stmt))
2461 	    {
2462 	      tree lhs = gimple_assign_lhs (stmt);
2463 	      tree rhs = gimple_assign_rhs1 (stmt);
2464 	      tree type = TREE_TYPE (lhs);
2465 
2466 	      lhs = ifc_temp_var (type, unshare_expr (lhs), &gsi);
2467 	      rhs = ifc_temp_var (type, unshare_expr (rhs), &gsi);
2468 	      if (swap)
2469 		std::swap (lhs, rhs);
2470 	      cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
2471 						 is_gimple_condexpr, NULL_TREE,
2472 						 true, GSI_SAME_STMT);
2473 	      rhs = fold_build_cond_expr (type, unshare_expr (cond), rhs, lhs);
2474 	      gimple_assign_set_rhs1 (stmt, ifc_temp_var (type, rhs, &gsi));
2475 	      update_stmt (stmt);
2476 	    }
2477 	  tree lhs = gimple_get_lhs (gsi_stmt (gsi));
2478 	  if (lhs && TREE_CODE (lhs) == SSA_NAME)
2479 	    ssa_names.add (lhs);
2480 	  gsi_next (&gsi);
2481 	}
2482       ssa_names.empty ();
2483     }
2484 }
2485 
2486 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2487    other than the exit and latch of the LOOP.  Also resets the
2488    GIMPLE_DEBUG information.  */
2489 
2490 static void
2491 remove_conditions_and_labels (loop_p loop)
2492 {
2493   gimple_stmt_iterator gsi;
2494   unsigned int i;
2495 
2496   for (i = 0; i < loop->num_nodes; i++)
2497     {
2498       basic_block bb = ifc_bbs[i];
2499 
2500       if (bb_with_exit_edge_p (loop, bb)
2501         || bb == loop->latch)
2502       continue;
2503 
2504       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2505 	switch (gimple_code (gsi_stmt (gsi)))
2506 	  {
2507 	  case GIMPLE_COND:
2508 	  case GIMPLE_LABEL:
2509 	    gsi_remove (&gsi, true);
2510 	    break;
2511 
2512 	  case GIMPLE_DEBUG:
2513 	    /* ??? Should there be conditional GIMPLE_DEBUG_BINDs?  */
2514 	    if (gimple_debug_bind_p (gsi_stmt (gsi)))
2515 	      {
2516 		gimple_debug_bind_reset_value (gsi_stmt (gsi));
2517 		update_stmt (gsi_stmt (gsi));
2518 	      }
2519 	    gsi_next (&gsi);
2520 	    break;
2521 
2522 	  default:
2523 	    gsi_next (&gsi);
2524 	  }
2525     }
2526 }
2527 
2528 /* Combine all the basic blocks from LOOP into one or two super basic
2529    blocks.  Replace PHI nodes with conditional modify expressions.  */
2530 
2531 static void
2532 combine_blocks (class loop *loop)
2533 {
2534   basic_block bb, exit_bb, merge_target_bb;
2535   unsigned int orig_loop_num_nodes = loop->num_nodes;
2536   unsigned int i;
2537   edge e;
2538   edge_iterator ei;
2539 
2540   remove_conditions_and_labels (loop);
2541   insert_gimplified_predicates (loop);
2542   predicate_all_scalar_phis (loop);
2543 
2544   if (need_to_predicate)
2545     predicate_statements (loop);
2546 
2547   /* Merge basic blocks: first remove all the edges in the loop,
2548      except for those from the exit block.  */
2549   exit_bb = NULL;
2550   bool *predicated = XNEWVEC (bool, orig_loop_num_nodes);
2551   for (i = 0; i < orig_loop_num_nodes; i++)
2552     {
2553       bb = ifc_bbs[i];
2554       predicated[i] = !is_true_predicate (bb_predicate (bb));
2555       free_bb_predicate (bb);
2556       if (bb_with_exit_edge_p (loop, bb))
2557 	{
2558 	  gcc_assert (exit_bb == NULL);
2559 	  exit_bb = bb;
2560 	}
2561     }
2562   gcc_assert (exit_bb != loop->latch);
2563 
2564   for (i = 1; i < orig_loop_num_nodes; i++)
2565     {
2566       bb = ifc_bbs[i];
2567 
2568       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
2569 	{
2570 	  if (e->src == exit_bb)
2571 	    ei_next (&ei);
2572 	  else
2573 	    remove_edge (e);
2574 	}
2575     }
2576 
2577   if (exit_bb != NULL)
2578     {
2579       if (exit_bb != loop->header)
2580 	{
2581 	  /* Connect this node to loop header.  */
2582 	  make_single_succ_edge (loop->header, exit_bb, EDGE_FALLTHRU);
2583 	  set_immediate_dominator (CDI_DOMINATORS, exit_bb, loop->header);
2584 	}
2585 
2586       /* Redirect non-exit edges to loop->latch.  */
2587       FOR_EACH_EDGE (e, ei, exit_bb->succs)
2588 	{
2589 	  if (!loop_exit_edge_p (loop, e))
2590 	    redirect_edge_and_branch (e, loop->latch);
2591 	}
2592       set_immediate_dominator (CDI_DOMINATORS, loop->latch, exit_bb);
2593     }
2594   else
2595     {
2596       /* If the loop does not have an exit, reconnect header and latch.  */
2597       make_edge (loop->header, loop->latch, EDGE_FALLTHRU);
2598       set_immediate_dominator (CDI_DOMINATORS, loop->latch, loop->header);
2599     }
2600 
2601   merge_target_bb = loop->header;
2602 
2603   /* Get at the virtual def valid for uses starting at the first block
2604      we merge into the header.  Without a virtual PHI the loop has the
2605      same virtual use on all stmts.  */
2606   gphi *vphi = get_virtual_phi (loop->header);
2607   tree last_vdef = NULL_TREE;
2608   if (vphi)
2609     {
2610       last_vdef = gimple_phi_result (vphi);
2611       for (gimple_stmt_iterator gsi = gsi_start_bb (loop->header);
2612 	   ! gsi_end_p (gsi); gsi_next (&gsi))
2613 	if (gimple_vdef (gsi_stmt (gsi)))
2614 	  last_vdef = gimple_vdef (gsi_stmt (gsi));
2615     }
2616   for (i = 1; i < orig_loop_num_nodes; i++)
2617     {
2618       gimple_stmt_iterator gsi;
2619       gimple_stmt_iterator last;
2620 
2621       bb = ifc_bbs[i];
2622 
2623       if (bb == exit_bb || bb == loop->latch)
2624 	continue;
2625 
2626       /* We release virtual PHIs late because we have to propagate them
2627          out using the current VUSE.  The def might be the one used
2628 	 after the loop.  */
2629       vphi = get_virtual_phi (bb);
2630       if (vphi)
2631 	{
2632 	  /* When there's just loads inside the loop a stray virtual
2633 	     PHI merging the uses can appear, update last_vdef from
2634 	     it.  */
2635 	  if (!last_vdef)
2636 	    last_vdef = gimple_phi_arg_def (vphi, 0);
2637 	  imm_use_iterator iter;
2638 	  use_operand_p use_p;
2639 	  gimple *use_stmt;
2640 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2641 	    {
2642 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2643 		SET_USE (use_p, last_vdef);
2644 	    }
2645 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (vphi)))
2646 	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (last_vdef) = 1;
2647 	  gsi = gsi_for_stmt (vphi);
2648 	  remove_phi_node (&gsi, true);
2649 	}
2650 
2651       /* Make stmts member of loop->header and clear range info from all stmts
2652 	 in BB which is now no longer executed conditional on a predicate we
2653 	 could have derived it from.  */
2654       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2655 	{
2656 	  gimple *stmt = gsi_stmt (gsi);
2657 	  gimple_set_bb (stmt, merge_target_bb);
2658 	  /* Update virtual operands.  */
2659 	  if (last_vdef)
2660 	    {
2661 	      use_operand_p use_p = ssa_vuse_operand (stmt);
2662 	      if (use_p
2663 		  && USE_FROM_PTR (use_p) != last_vdef)
2664 		SET_USE (use_p, last_vdef);
2665 	      if (gimple_vdef (stmt))
2666 		last_vdef = gimple_vdef (stmt);
2667 	    }
2668 	  else
2669 	    /* If this is the first load we arrive at update last_vdef
2670 	       so we handle stray PHIs correctly.  */
2671 	    last_vdef = gimple_vuse (stmt);
2672 	  if (predicated[i])
2673 	    {
2674 	      ssa_op_iter i;
2675 	      tree op;
2676 	      FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
2677 		reset_flow_sensitive_info (op);
2678 	    }
2679 	}
2680 
2681       /* Update stmt list.  */
2682       last = gsi_last_bb (merge_target_bb);
2683       gsi_insert_seq_after_without_update (&last, bb_seq (bb), GSI_NEW_STMT);
2684       set_bb_seq (bb, NULL);
2685 
2686       delete_basic_block (bb);
2687     }
2688 
2689   /* If possible, merge loop header to the block with the exit edge.
2690      This reduces the number of basic blocks to two, to please the
2691      vectorizer that handles only loops with two nodes.  */
2692   if (exit_bb
2693       && exit_bb != loop->header)
2694     {
2695       /* We release virtual PHIs late because we have to propagate them
2696          out using the current VUSE.  The def might be the one used
2697 	 after the loop.  */
2698       vphi = get_virtual_phi (exit_bb);
2699       if (vphi)
2700 	{
2701 	  imm_use_iterator iter;
2702 	  use_operand_p use_p;
2703 	  gimple *use_stmt;
2704 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2705 	    {
2706 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2707 		SET_USE (use_p, last_vdef);
2708 	    }
2709 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (vphi)))
2710 	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (last_vdef) = 1;
2711 	  gimple_stmt_iterator gsi = gsi_for_stmt (vphi);
2712 	  remove_phi_node (&gsi, true);
2713 	}
2714 
2715       if (can_merge_blocks_p (loop->header, exit_bb))
2716 	merge_blocks (loop->header, exit_bb);
2717     }
2718 
2719   free (ifc_bbs);
2720   ifc_bbs = NULL;
2721   free (predicated);
2722 }
2723 
2724 /* Version LOOP before if-converting it; the original loop
2725    will be if-converted, the new copy of the loop will not,
2726    and the LOOP_VECTORIZED internal call will be guarding which
2727    loop to execute.  The vectorizer pass will fold this
2728    internal call into either true or false.
2729 
2730    Note that this function intentionally invalidates profile.  Both edges
2731    out of LOOP_VECTORIZED must have 100% probability so the profile remains
2732    consistent after the condition is folded in the vectorizer.  */
2733 
2734 static class loop *
2735 version_loop_for_if_conversion (class loop *loop, vec<gimple *> *preds)
2736 {
2737   basic_block cond_bb;
2738   tree cond = make_ssa_name (boolean_type_node);
2739   class loop *new_loop;
2740   gimple *g;
2741   gimple_stmt_iterator gsi;
2742   unsigned int save_length;
2743 
2744   g = gimple_build_call_internal (IFN_LOOP_VECTORIZED, 2,
2745 				  build_int_cst (integer_type_node, loop->num),
2746 				  integer_zero_node);
2747   gimple_call_set_lhs (g, cond);
2748 
2749   /* Save BB->aux around loop_version as that uses the same field.  */
2750   save_length = loop->inner ? loop->inner->num_nodes : loop->num_nodes;
2751   void **saved_preds = XALLOCAVEC (void *, save_length);
2752   for (unsigned i = 0; i < save_length; i++)
2753     saved_preds[i] = ifc_bbs[i]->aux;
2754 
2755   initialize_original_copy_tables ();
2756   /* At this point we invalidate porfile confistency until IFN_LOOP_VECTORIZED
2757      is re-merged in the vectorizer.  */
2758   new_loop = loop_version (loop, cond, &cond_bb,
2759 			   profile_probability::always (),
2760 			   profile_probability::always (),
2761 			   profile_probability::always (),
2762 			   profile_probability::always (), true);
2763   free_original_copy_tables ();
2764 
2765   for (unsigned i = 0; i < save_length; i++)
2766     ifc_bbs[i]->aux = saved_preds[i];
2767 
2768   if (new_loop == NULL)
2769     return NULL;
2770 
2771   new_loop->dont_vectorize = true;
2772   new_loop->force_vectorize = false;
2773   gsi = gsi_last_bb (cond_bb);
2774   gimple_call_set_arg (g, 1, build_int_cst (integer_type_node, new_loop->num));
2775   if (preds)
2776     preds->safe_push (g);
2777   gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2778   update_ssa (TODO_update_ssa);
2779   return new_loop;
2780 }
2781 
2782 /* Return true when LOOP satisfies the follow conditions that will
2783    allow it to be recognized by the vectorizer for outer-loop
2784    vectorization:
2785     - The loop is not the root node of the loop tree.
2786     - The loop has exactly one inner loop.
2787     - The loop has a single exit.
2788     - The loop header has a single successor, which is the inner
2789       loop header.
2790     - Each of the inner and outer loop latches have a single
2791       predecessor.
2792     - The loop exit block has a single predecessor, which is the
2793       inner loop's exit block.  */
2794 
2795 static bool
2796 versionable_outer_loop_p (class loop *loop)
2797 {
2798   if (!loop_outer (loop)
2799       || loop->dont_vectorize
2800       || !loop->inner
2801       || loop->inner->next
2802       || !single_exit (loop)
2803       || !single_succ_p (loop->header)
2804       || single_succ (loop->header) != loop->inner->header
2805       || !single_pred_p (loop->latch)
2806       || !single_pred_p (loop->inner->latch))
2807     return false;
2808 
2809   basic_block outer_exit = single_pred (loop->latch);
2810   basic_block inner_exit = single_pred (loop->inner->latch);
2811 
2812   if (!single_pred_p (outer_exit) || single_pred (outer_exit) != inner_exit)
2813     return false;
2814 
2815   if (dump_file)
2816     fprintf (dump_file, "Found vectorizable outer loop for versioning\n");
2817 
2818   return true;
2819 }
2820 
2821 /* Performs splitting of critical edges.  Skip splitting and return false
2822    if LOOP will not be converted because:
2823 
2824      - LOOP is not well formed.
2825      - LOOP has PHI with more than MAX_PHI_ARG_NUM arguments.
2826 
2827    Last restriction is valid only if AGGRESSIVE_IF_CONV is false.  */
2828 
2829 static bool
2830 ifcvt_split_critical_edges (class loop *loop, bool aggressive_if_conv)
2831 {
2832   basic_block *body;
2833   basic_block bb;
2834   unsigned int num = loop->num_nodes;
2835   unsigned int i;
2836   gimple *stmt;
2837   edge e;
2838   edge_iterator ei;
2839   auto_vec<edge> critical_edges;
2840 
2841   /* Loop is not well formed.  */
2842   if (num <= 2 || loop->inner || !single_exit (loop))
2843     return false;
2844 
2845   body = get_loop_body (loop);
2846   for (i = 0; i < num; i++)
2847     {
2848       bb = body[i];
2849       if (!aggressive_if_conv
2850 	  && phi_nodes (bb)
2851 	  && EDGE_COUNT (bb->preds) > MAX_PHI_ARG_NUM)
2852 	{
2853 	  if (dump_file && (dump_flags & TDF_DETAILS))
2854 	    fprintf (dump_file,
2855 		     "BB %d has complicated PHI with more than %u args.\n",
2856 		     bb->index, MAX_PHI_ARG_NUM);
2857 
2858 	  free (body);
2859 	  return false;
2860 	}
2861       if (bb == loop->latch || bb_with_exit_edge_p (loop, bb))
2862 	continue;
2863 
2864       stmt = last_stmt (bb);
2865       /* Skip basic blocks not ending with conditional branch.  */
2866       if (!stmt || gimple_code (stmt) != GIMPLE_COND)
2867 	continue;
2868 
2869       FOR_EACH_EDGE (e, ei, bb->succs)
2870 	if (EDGE_CRITICAL_P (e) && e->dest->loop_father == loop)
2871 	  critical_edges.safe_push (e);
2872     }
2873   free (body);
2874 
2875   while (critical_edges.length () > 0)
2876     {
2877       e = critical_edges.pop ();
2878       /* Don't split if bb can be predicated along non-critical edge.  */
2879       if (EDGE_COUNT (e->dest->preds) > 2 || all_preds_critical_p (e->dest))
2880 	split_edge (e);
2881     }
2882 
2883   return true;
2884 }
2885 
2886 /* Delete redundant statements produced by predication which prevents
2887    loop vectorization.  */
2888 
2889 static void
2890 ifcvt_local_dce (class loop *loop)
2891 {
2892   gimple *stmt;
2893   gimple *stmt1;
2894   gimple *phi;
2895   gimple_stmt_iterator gsi;
2896   auto_vec<gimple *> worklist;
2897   enum gimple_code code;
2898   use_operand_p use_p;
2899   imm_use_iterator imm_iter;
2900 
2901   /* The loop has a single BB only.  */
2902   basic_block bb = loop->header;
2903   tree latch_vdef = NULL_TREE;
2904 
2905   worklist.create (64);
2906   /* Consider all phi as live statements.  */
2907   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2908     {
2909       phi = gsi_stmt (gsi);
2910       gimple_set_plf (phi, GF_PLF_2, true);
2911       worklist.safe_push (phi);
2912       if (virtual_operand_p (gimple_phi_result (phi)))
2913 	latch_vdef = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2914     }
2915   /* Consider load/store statements, CALL and COND as live.  */
2916   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2917     {
2918       stmt = gsi_stmt (gsi);
2919       if (is_gimple_debug (stmt))
2920 	{
2921 	  gimple_set_plf (stmt, GF_PLF_2, true);
2922 	  continue;
2923 	}
2924       if (gimple_store_p (stmt) || gimple_assign_load_p (stmt))
2925 	{
2926 	  gimple_set_plf (stmt, GF_PLF_2, true);
2927 	  worklist.safe_push (stmt);
2928 	  continue;
2929 	}
2930       code = gimple_code (stmt);
2931       if (code == GIMPLE_COND || code == GIMPLE_CALL)
2932 	{
2933 	  gimple_set_plf (stmt, GF_PLF_2, true);
2934 	  worklist.safe_push (stmt);
2935 	  continue;
2936 	}
2937       gimple_set_plf (stmt, GF_PLF_2, false);
2938 
2939       if (code == GIMPLE_ASSIGN)
2940 	{
2941 	  tree lhs = gimple_assign_lhs (stmt);
2942 	  FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2943 	    {
2944 	      stmt1 = USE_STMT (use_p);
2945 	      if (!is_gimple_debug (stmt1) && gimple_bb (stmt1) != bb)
2946 		{
2947 		  gimple_set_plf (stmt, GF_PLF_2, true);
2948 		  worklist.safe_push (stmt);
2949 		  break;
2950 		}
2951 	    }
2952 	}
2953     }
2954   /* Propagate liveness through arguments of live stmt.  */
2955   while (worklist.length () > 0)
2956     {
2957       ssa_op_iter iter;
2958       use_operand_p use_p;
2959       tree use;
2960 
2961       stmt = worklist.pop ();
2962       FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2963 	{
2964 	  use = USE_FROM_PTR (use_p);
2965 	  if (TREE_CODE (use) != SSA_NAME)
2966 	    continue;
2967 	  stmt1 = SSA_NAME_DEF_STMT (use);
2968 	  if (gimple_bb (stmt1) != bb || gimple_plf (stmt1, GF_PLF_2))
2969 	    continue;
2970 	  gimple_set_plf (stmt1, GF_PLF_2, true);
2971 	  worklist.safe_push (stmt1);
2972 	}
2973     }
2974   /* Delete dead statements.  */
2975   gsi = gsi_last_bb (bb);
2976   while (!gsi_end_p (gsi))
2977     {
2978       gimple_stmt_iterator gsiprev = gsi;
2979       gsi_prev (&gsiprev);
2980       stmt = gsi_stmt (gsi);
2981       if (gimple_store_p (stmt))
2982 	{
2983 	  tree lhs = gimple_get_lhs (stmt);
2984 	  ao_ref write;
2985 	  ao_ref_init (&write, lhs);
2986 
2987 	  if (dse_classify_store (&write, stmt, false, NULL, NULL, latch_vdef)
2988 	      == DSE_STORE_DEAD)
2989 	    delete_dead_or_redundant_assignment (&gsi, "dead");
2990 	  gsi = gsiprev;
2991 	  continue;
2992 	}
2993 
2994       if (gimple_plf (stmt, GF_PLF_2))
2995 	{
2996 	  gsi = gsiprev;
2997 	  continue;
2998 	}
2999       if (dump_file && (dump_flags & TDF_DETAILS))
3000 	{
3001 	  fprintf (dump_file, "Delete dead stmt in bb#%d\n", bb->index);
3002 	  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3003 	}
3004       gsi_remove (&gsi, true);
3005       release_defs (stmt);
3006       gsi = gsiprev;
3007     }
3008 }
3009 
3010 /* If-convert LOOP when it is legal.  For the moment this pass has no
3011    profitability analysis.  Returns non-zero todo flags when something
3012    changed.  */
3013 
3014 unsigned int
3015 tree_if_conversion (class loop *loop, vec<gimple *> *preds)
3016 {
3017   unsigned int todo = 0;
3018   bool aggressive_if_conv;
3019   class loop *rloop;
3020   bitmap exit_bbs;
3021 
3022  again:
3023   rloop = NULL;
3024   ifc_bbs = NULL;
3025   need_to_predicate = false;
3026   any_complicated_phi = false;
3027 
3028   /* Apply more aggressive if-conversion when loop or its outer loop were
3029      marked with simd pragma.  When that's the case, we try to if-convert
3030      loop containing PHIs with more than MAX_PHI_ARG_NUM arguments.  */
3031   aggressive_if_conv = loop->force_vectorize;
3032   if (!aggressive_if_conv)
3033     {
3034       class loop *outer_loop = loop_outer (loop);
3035       if (outer_loop && outer_loop->force_vectorize)
3036 	aggressive_if_conv = true;
3037     }
3038 
3039   if (!ifcvt_split_critical_edges (loop, aggressive_if_conv))
3040     goto cleanup;
3041 
3042   if (!if_convertible_loop_p (loop)
3043       || !dbg_cnt (if_conversion_tree))
3044     goto cleanup;
3045 
3046   if ((need_to_predicate || any_complicated_phi)
3047       && ((!flag_tree_loop_vectorize && !loop->force_vectorize)
3048 	  || loop->dont_vectorize))
3049     goto cleanup;
3050 
3051   /* Since we have no cost model, always version loops unless the user
3052      specified -ftree-loop-if-convert or unless versioning is required.
3053      Either version this loop, or if the pattern is right for outer-loop
3054      vectorization, version the outer loop.  In the latter case we will
3055      still if-convert the original inner loop.  */
3056   if (need_to_predicate
3057       || any_complicated_phi
3058       || flag_tree_loop_if_convert != 1)
3059     {
3060       class loop *vloop
3061 	= (versionable_outer_loop_p (loop_outer (loop))
3062 	   ? loop_outer (loop) : loop);
3063       class loop *nloop = version_loop_for_if_conversion (vloop, preds);
3064       if (nloop == NULL)
3065 	goto cleanup;
3066       if (vloop != loop)
3067 	{
3068 	  /* If versionable_outer_loop_p decided to version the
3069 	     outer loop, version also the inner loop of the non-vectorized
3070 	     loop copy.  So we transform:
3071 	      loop1
3072 		loop2
3073 	     into:
3074 	      if (LOOP_VECTORIZED (1, 3))
3075 		{
3076 		  loop1
3077 		    loop2
3078 		}
3079 	      else
3080 		loop3 (copy of loop1)
3081 		  if (LOOP_VECTORIZED (4, 5))
3082 		    loop4 (copy of loop2)
3083 		  else
3084 		    loop5 (copy of loop4)  */
3085 	  gcc_assert (nloop->inner && nloop->inner->next == NULL);
3086 	  rloop = nloop->inner;
3087 	}
3088     }
3089 
3090   /* Now all statements are if-convertible.  Combine all the basic
3091      blocks into one huge basic block doing the if-conversion
3092      on-the-fly.  */
3093   combine_blocks (loop);
3094 
3095   /* Perform local CSE, this esp. helps the vectorizer analysis if loads
3096      and stores are involved.  CSE only the loop body, not the entry
3097      PHIs, those are to be kept in sync with the non-if-converted copy.
3098      ???  We'll still keep dead stores though.  */
3099   exit_bbs = BITMAP_ALLOC (NULL);
3100   bitmap_set_bit (exit_bbs, single_exit (loop)->dest->index);
3101   bitmap_set_bit (exit_bbs, loop->latch->index);
3102 
3103   std::pair <tree, tree> *name_pair;
3104   unsigned ssa_names_idx;
3105   FOR_EACH_VEC_ELT (redundant_ssa_names, ssa_names_idx, name_pair)
3106     replace_uses_by (name_pair->first, name_pair->second);
3107   redundant_ssa_names.release ();
3108 
3109   todo |= do_rpo_vn (cfun, loop_preheader_edge (loop), exit_bbs);
3110 
3111   /* Delete dead predicate computations.  */
3112   ifcvt_local_dce (loop);
3113   BITMAP_FREE (exit_bbs);
3114 
3115   todo |= TODO_cleanup_cfg;
3116 
3117  cleanup:
3118   if (ifc_bbs)
3119     {
3120       unsigned int i;
3121 
3122       for (i = 0; i < loop->num_nodes; i++)
3123 	free_bb_predicate (ifc_bbs[i]);
3124 
3125       free (ifc_bbs);
3126       ifc_bbs = NULL;
3127     }
3128   if (rloop != NULL)
3129     {
3130       loop = rloop;
3131       goto again;
3132     }
3133 
3134   return todo;
3135 }
3136 
3137 /* Tree if-conversion pass management.  */
3138 
3139 namespace {
3140 
3141 const pass_data pass_data_if_conversion =
3142 {
3143   GIMPLE_PASS, /* type */
3144   "ifcvt", /* name */
3145   OPTGROUP_NONE, /* optinfo_flags */
3146   TV_TREE_LOOP_IFCVT, /* tv_id */
3147   ( PROP_cfg | PROP_ssa ), /* properties_required */
3148   0, /* properties_provided */
3149   0, /* properties_destroyed */
3150   0, /* todo_flags_start */
3151   0, /* todo_flags_finish */
3152 };
3153 
3154 class pass_if_conversion : public gimple_opt_pass
3155 {
3156 public:
3157   pass_if_conversion (gcc::context *ctxt)
3158     : gimple_opt_pass (pass_data_if_conversion, ctxt)
3159   {}
3160 
3161   /* opt_pass methods: */
3162   virtual bool gate (function *);
3163   virtual unsigned int execute (function *);
3164 
3165 }; // class pass_if_conversion
3166 
3167 bool
3168 pass_if_conversion::gate (function *fun)
3169 {
3170   return (((flag_tree_loop_vectorize || fun->has_force_vectorize_loops)
3171 	   && flag_tree_loop_if_convert != 0)
3172 	  || flag_tree_loop_if_convert == 1);
3173 }
3174 
3175 unsigned int
3176 pass_if_conversion::execute (function *fun)
3177 {
3178   class loop *loop;
3179   unsigned todo = 0;
3180 
3181   if (number_of_loops (fun) <= 1)
3182     return 0;
3183 
3184   auto_vec<gimple *> preds;
3185   FOR_EACH_LOOP (loop, 0)
3186     if (flag_tree_loop_if_convert == 1
3187 	|| ((flag_tree_loop_vectorize || loop->force_vectorize)
3188 	    && !loop->dont_vectorize))
3189       todo |= tree_if_conversion (loop, &preds);
3190 
3191   if (todo)
3192     {
3193       free_numbers_of_iterations_estimates (fun);
3194       scev_reset ();
3195     }
3196 
3197   if (flag_checking)
3198     {
3199       basic_block bb;
3200       FOR_EACH_BB_FN (bb, fun)
3201 	gcc_assert (!bb->aux);
3202     }
3203 
3204   /* Perform IL update now, it might elide some loops.  */
3205   if (todo & TODO_cleanup_cfg)
3206     {
3207       cleanup_tree_cfg ();
3208       if (need_ssa_update_p (fun))
3209 	todo |= TODO_update_ssa;
3210     }
3211   if (todo & TODO_update_ssa_any)
3212     update_ssa (todo & TODO_update_ssa_any);
3213 
3214   /* If if-conversion elided the loop fall back to the original one.  */
3215   for (unsigned i = 0; i < preds.length (); ++i)
3216     {
3217       gimple *g = preds[i];
3218       if (!gimple_bb (g))
3219 	continue;
3220       unsigned ifcvt_loop = tree_to_uhwi (gimple_call_arg (g, 0));
3221       if (!get_loop (fun, ifcvt_loop))
3222 	{
3223 	  if (dump_file)
3224 	    fprintf (dump_file, "If-converted loop vanished\n");
3225 	  fold_loop_internal_call (g, boolean_false_node);
3226 	}
3227     }
3228 
3229   return 0;
3230 }
3231 
3232 } // anon namespace
3233 
3234 gimple_opt_pass *
3235 make_pass_if_conversion (gcc::context *ctxt)
3236 {
3237   return new pass_if_conversion (ctxt);
3238 }
3239