1 /* Lower GIMPLE_SWITCH expressions to something more efficient than
2    a jump table.
3    Copyright (C) 2006-2019 Free Software Foundation, Inc.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA.  */
21 
22 /* This file handles the lowering of GIMPLE_SWITCH to an indexed
23    load, or a series of bit-test-and-branch expressions.  */
24 
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "insn-codes.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "cfghooks.h"
34 #include "tree-pass.h"
35 #include "ssa.h"
36 #include "optabs-tree.h"
37 #include "cgraph.h"
38 #include "gimple-pretty-print.h"
39 #include "params.h"
40 #include "fold-const.h"
41 #include "varasm.h"
42 #include "stor-layout.h"
43 #include "cfganal.h"
44 #include "gimplify.h"
45 #include "gimple-iterator.h"
46 #include "gimplify-me.h"
47 #include "gimple-fold.h"
48 #include "tree-cfg.h"
49 #include "cfgloop.h"
50 #include "alloc-pool.h"
51 #include "target.h"
52 #include "tree-into-ssa.h"
53 #include "omp-general.h"
54 
55 /* ??? For lang_hooks.types.type_for_mode, but is there a word_mode
56    type in the GIMPLE type system that is language-independent?  */
57 #include "langhooks.h"
58 
59 #include "tree-switch-conversion.h"
60 
61 using namespace tree_switch_conversion;
62 
63 /* Constructor.  */
64 
switch_conversion()65 switch_conversion::switch_conversion (): m_final_bb (NULL), m_other_count (),
66   m_constructors (NULL), m_default_values (NULL),
67   m_arr_ref_first (NULL), m_arr_ref_last (NULL),
68   m_reason (NULL), m_default_case_nonstandard (false), m_cfg_altered (false)
69 {
70 }
71 
72 /* Collection information about SWTCH statement.  */
73 
74 void
collect(gswitch * swtch)75 switch_conversion::collect (gswitch *swtch)
76 {
77   unsigned int branch_num = gimple_switch_num_labels (swtch);
78   tree min_case, max_case;
79   unsigned int i;
80   edge e, e_default, e_first;
81   edge_iterator ei;
82 
83   m_switch = swtch;
84 
85   /* The gimplifier has already sorted the cases by CASE_LOW and ensured there
86      is a default label which is the first in the vector.
87      Collect the bits we can deduce from the CFG.  */
88   m_index_expr = gimple_switch_index (swtch);
89   m_switch_bb = gimple_bb (swtch);
90   e_default = gimple_switch_default_edge (cfun, swtch);
91   m_default_bb = e_default->dest;
92   m_default_prob = e_default->probability;
93   m_default_count = e_default->count ();
94   FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
95     if (e != e_default)
96       m_other_count += e->count ();
97 
98   /* Get upper and lower bounds of case values, and the covered range.  */
99   min_case = gimple_switch_label (swtch, 1);
100   max_case = gimple_switch_label (swtch, branch_num - 1);
101 
102   m_range_min = CASE_LOW (min_case);
103   if (CASE_HIGH (max_case) != NULL_TREE)
104     m_range_max = CASE_HIGH (max_case);
105   else
106     m_range_max = CASE_LOW (max_case);
107 
108   m_contiguous_range = true;
109   tree last = CASE_HIGH (min_case) ? CASE_HIGH (min_case) : m_range_min;
110   for (i = 2; i < branch_num; i++)
111     {
112       tree elt = gimple_switch_label (swtch, i);
113       if (wi::to_wide (last) + 1 != wi::to_wide (CASE_LOW (elt)))
114 	{
115 	  m_contiguous_range = false;
116 	  break;
117 	}
118       last = CASE_HIGH (elt) ? CASE_HIGH (elt) : CASE_LOW (elt);
119     }
120 
121   if (m_contiguous_range)
122     e_first = gimple_switch_edge (cfun, swtch, 1);
123   else
124     e_first = e_default;
125 
126   /* See if there is one common successor block for all branch
127      targets.  If it exists, record it in FINAL_BB.
128      Start with the destination of the first non-default case
129      if the range is contiguous and default case otherwise as
130      guess or its destination in case it is a forwarder block.  */
131   if (! single_pred_p (e_first->dest))
132     m_final_bb = e_first->dest;
133   else if (single_succ_p (e_first->dest)
134 	   && ! single_pred_p (single_succ (e_first->dest)))
135     m_final_bb = single_succ (e_first->dest);
136   /* Require that all switch destinations are either that common
137      FINAL_BB or a forwarder to it, except for the default
138      case if contiguous range.  */
139   if (m_final_bb)
140     FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
141       {
142 	if (e->dest == m_final_bb)
143 	  continue;
144 
145 	if (single_pred_p (e->dest)
146 	    && single_succ_p (e->dest)
147 	    && single_succ (e->dest) == m_final_bb)
148 	  continue;
149 
150 	if (e == e_default && m_contiguous_range)
151 	  {
152 	    m_default_case_nonstandard = true;
153 	    continue;
154 	  }
155 
156 	m_final_bb = NULL;
157 	break;
158       }
159 
160   m_range_size
161     = int_const_binop (MINUS_EXPR, m_range_max, m_range_min);
162 
163   /* Get a count of the number of case labels.  Single-valued case labels
164      simply count as one, but a case range counts double, since it may
165      require two compares if it gets lowered as a branching tree.  */
166   m_count = 0;
167   for (i = 1; i < branch_num; i++)
168     {
169       tree elt = gimple_switch_label (swtch, i);
170       m_count++;
171       if (CASE_HIGH (elt)
172 	  && ! tree_int_cst_equal (CASE_LOW (elt), CASE_HIGH (elt)))
173 	m_count++;
174     }
175 
176   /* Get the number of unique non-default targets out of the GIMPLE_SWITCH
177      block.  Assume a CFG cleanup would have already removed degenerate
178      switch statements, this allows us to just use EDGE_COUNT.  */
179   m_uniq = EDGE_COUNT (gimple_bb (swtch)->succs) - 1;
180 }
181 
182 /* Checks whether the range given by individual case statements of the switch
183    switch statement isn't too big and whether the number of branches actually
184    satisfies the size of the new array.  */
185 
186 bool
check_range()187 switch_conversion::check_range ()
188 {
189   gcc_assert (m_range_size);
190   if (!tree_fits_uhwi_p (m_range_size))
191     {
192       m_reason = "index range way too large or otherwise unusable";
193       return false;
194     }
195 
196   if (tree_to_uhwi (m_range_size)
197       > ((unsigned) m_count * SWITCH_CONVERSION_BRANCH_RATIO))
198     {
199       m_reason = "the maximum range-branch ratio exceeded";
200       return false;
201     }
202 
203   return true;
204 }
205 
206 /* Checks whether all but the final BB basic blocks are empty.  */
207 
208 bool
check_all_empty_except_final()209 switch_conversion::check_all_empty_except_final ()
210 {
211   edge e, e_default = find_edge (m_switch_bb, m_default_bb);
212   edge_iterator ei;
213 
214   FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
215     {
216       if (e->dest == m_final_bb)
217 	continue;
218 
219       if (!empty_block_p (e->dest))
220 	{
221 	  if (m_contiguous_range && e == e_default)
222 	    {
223 	      m_default_case_nonstandard = true;
224 	      continue;
225 	    }
226 
227 	  m_reason = "bad case - a non-final BB not empty";
228 	  return false;
229 	}
230     }
231 
232   return true;
233 }
234 
235 /* This function checks whether all required values in phi nodes in final_bb
236    are constants.  Required values are those that correspond to a basic block
237    which is a part of the examined switch statement.  It returns true if the
238    phi nodes are OK, otherwise false.  */
239 
240 bool
check_final_bb()241 switch_conversion::check_final_bb ()
242 {
243   gphi_iterator gsi;
244 
245   m_phi_count = 0;
246   for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
247     {
248       gphi *phi = gsi.phi ();
249       unsigned int i;
250 
251       if (virtual_operand_p (gimple_phi_result (phi)))
252 	continue;
253 
254       m_phi_count++;
255 
256       for (i = 0; i < gimple_phi_num_args (phi); i++)
257 	{
258 	  basic_block bb = gimple_phi_arg_edge (phi, i)->src;
259 
260 	  if (bb == m_switch_bb
261 	      || (single_pred_p (bb)
262 		  && single_pred (bb) == m_switch_bb
263 		  && (!m_default_case_nonstandard
264 		      || empty_block_p (bb))))
265 	    {
266 	      tree reloc, val;
267 	      const char *reason = NULL;
268 
269 	      val = gimple_phi_arg_def (phi, i);
270 	      if (!is_gimple_ip_invariant (val))
271 		reason = "non-invariant value from a case";
272 	      else
273 		{
274 		  reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
275 		  if ((flag_pic && reloc != null_pointer_node)
276 		      || (!flag_pic && reloc == NULL_TREE))
277 		    {
278 		      if (reloc)
279 			reason
280 			  = "value from a case would need runtime relocations";
281 		      else
282 			reason
283 			  = "value from a case is not a valid initializer";
284 		    }
285 		}
286 	      if (reason)
287 		{
288 		  /* For contiguous range, we can allow non-constant
289 		     or one that needs relocation, as long as it is
290 		     only reachable from the default case.  */
291 		  if (bb == m_switch_bb)
292 		    bb = m_final_bb;
293 		  if (!m_contiguous_range || bb != m_default_bb)
294 		    {
295 		      m_reason = reason;
296 		      return false;
297 		    }
298 
299 		  unsigned int branch_num = gimple_switch_num_labels (m_switch);
300 		  for (unsigned int i = 1; i < branch_num; i++)
301 		    {
302 		      if (gimple_switch_label_bb (cfun, m_switch, i) == bb)
303 			{
304 			  m_reason = reason;
305 			  return false;
306 			}
307 		    }
308 		  m_default_case_nonstandard = true;
309 		}
310 	    }
311 	}
312     }
313 
314   return true;
315 }
316 
317 /* The following function allocates default_values, target_{in,out}_names and
318    constructors arrays.  The last one is also populated with pointers to
319    vectors that will become constructors of new arrays.  */
320 
321 void
create_temp_arrays()322 switch_conversion::create_temp_arrays ()
323 {
324   int i;
325 
326   m_default_values = XCNEWVEC (tree, m_phi_count * 3);
327   /* ??? Macros do not support multi argument templates in their
328      argument list.  We create a typedef to work around that problem.  */
329   typedef vec<constructor_elt, va_gc> *vec_constructor_elt_gc;
330   m_constructors = XCNEWVEC (vec_constructor_elt_gc, m_phi_count);
331   m_target_inbound_names = m_default_values + m_phi_count;
332   m_target_outbound_names = m_target_inbound_names + m_phi_count;
333   for (i = 0; i < m_phi_count; i++)
334     vec_alloc (m_constructors[i], tree_to_uhwi (m_range_size) + 1);
335 }
336 
337 /* Populate the array of default values in the order of phi nodes.
338    DEFAULT_CASE is the CASE_LABEL_EXPR for the default switch branch
339    if the range is non-contiguous or the default case has standard
340    structure, otherwise it is the first non-default case instead.  */
341 
342 void
gather_default_values(tree default_case)343 switch_conversion::gather_default_values (tree default_case)
344 {
345   gphi_iterator gsi;
346   basic_block bb = label_to_block (cfun, CASE_LABEL (default_case));
347   edge e;
348   int i = 0;
349 
350   gcc_assert (CASE_LOW (default_case) == NULL_TREE
351 	      || m_default_case_nonstandard);
352 
353   if (bb == m_final_bb)
354     e = find_edge (m_switch_bb, bb);
355   else
356     e = single_succ_edge (bb);
357 
358   for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
359     {
360       gphi *phi = gsi.phi ();
361       if (virtual_operand_p (gimple_phi_result (phi)))
362 	continue;
363       tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
364       gcc_assert (val);
365       m_default_values[i++] = val;
366     }
367 }
368 
369 /* The following function populates the vectors in the constructors array with
370    future contents of the static arrays.  The vectors are populated in the
371    order of phi nodes.  */
372 
373 void
build_constructors()374 switch_conversion::build_constructors ()
375 {
376   unsigned i, branch_num = gimple_switch_num_labels (m_switch);
377   tree pos = m_range_min;
378   tree pos_one = build_int_cst (TREE_TYPE (pos), 1);
379 
380   for (i = 1; i < branch_num; i++)
381     {
382       tree cs = gimple_switch_label (m_switch, i);
383       basic_block bb = label_to_block (cfun, CASE_LABEL (cs));
384       edge e;
385       tree high;
386       gphi_iterator gsi;
387       int j;
388 
389       if (bb == m_final_bb)
390 	e = find_edge (m_switch_bb, bb);
391       else
392 	e = single_succ_edge (bb);
393       gcc_assert (e);
394 
395       while (tree_int_cst_lt (pos, CASE_LOW (cs)))
396 	{
397 	  int k;
398 	  for (k = 0; k < m_phi_count; k++)
399 	    {
400 	      constructor_elt elt;
401 
402 	      elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
403 	      elt.value
404 		= unshare_expr_without_location (m_default_values[k]);
405 	      m_constructors[k]->quick_push (elt);
406 	    }
407 
408 	  pos = int_const_binop (PLUS_EXPR, pos, pos_one);
409 	}
410       gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));
411 
412       j = 0;
413       if (CASE_HIGH (cs))
414 	high = CASE_HIGH (cs);
415       else
416 	high = CASE_LOW (cs);
417       for (gsi = gsi_start_phis (m_final_bb);
418 	   !gsi_end_p (gsi); gsi_next (&gsi))
419 	{
420 	  gphi *phi = gsi.phi ();
421 	  if (virtual_operand_p (gimple_phi_result (phi)))
422 	    continue;
423 	  tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
424 	  tree low = CASE_LOW (cs);
425 	  pos = CASE_LOW (cs);
426 
427 	  do
428 	    {
429 	      constructor_elt elt;
430 
431 	      elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
432 	      elt.value = unshare_expr_without_location (val);
433 	      m_constructors[j]->quick_push (elt);
434 
435 	      pos = int_const_binop (PLUS_EXPR, pos, pos_one);
436 	    } while (!tree_int_cst_lt (high, pos)
437 		     && tree_int_cst_lt (low, pos));
438 	  j++;
439 	}
440     }
441 }
442 
443 /* If all values in the constructor vector are products of a linear function
444    a * x + b, then return true.  When true, COEFF_A and COEFF_B and
445    coefficients of the linear function.  Note that equal values are special
446    case of a linear function with a and b equal to zero.  */
447 
448 bool
contains_linear_function_p(vec<constructor_elt,va_gc> * vec,wide_int * coeff_a,wide_int * coeff_b)449 switch_conversion::contains_linear_function_p (vec<constructor_elt, va_gc> *vec,
450 					       wide_int *coeff_a,
451 					       wide_int *coeff_b)
452 {
453   unsigned int i;
454   constructor_elt *elt;
455 
456   gcc_assert (vec->length () >= 2);
457 
458   /* Let's try to find any linear function a * x + y that can apply to
459      given values. 'a' can be calculated as follows:
460 
461      a = (y2 - y1) / (x2 - x1) where x2 - x1 = 1 (consecutive case indices)
462      a = y2 - y1
463 
464      and
465 
466      b = y2 - a * x2
467 
468   */
469 
470   tree elt0 = (*vec)[0].value;
471   tree elt1 = (*vec)[1].value;
472 
473   if (TREE_CODE (elt0) != INTEGER_CST || TREE_CODE (elt1) != INTEGER_CST)
474     return false;
475 
476   wide_int range_min
477     = wide_int::from (wi::to_wide (m_range_min),
478 		      TYPE_PRECISION (TREE_TYPE (elt0)),
479 		      TYPE_SIGN (TREE_TYPE (m_range_min)));
480   wide_int y1 = wi::to_wide (elt0);
481   wide_int y2 = wi::to_wide (elt1);
482   wide_int a = y2 - y1;
483   wide_int b = y2 - a * (range_min + 1);
484 
485   /* Verify that all values fulfill the linear function.  */
486   FOR_EACH_VEC_SAFE_ELT (vec, i, elt)
487     {
488       if (TREE_CODE (elt->value) != INTEGER_CST)
489 	return false;
490 
491       wide_int value = wi::to_wide (elt->value);
492       if (a * range_min + b != value)
493 	return false;
494 
495       ++range_min;
496     }
497 
498   *coeff_a = a;
499   *coeff_b = b;
500 
501   return true;
502 }
503 
504 /* Return type which should be used for array elements, either TYPE's
505    main variant or, for integral types, some smaller integral type
506    that can still hold all the constants.  */
507 
508 tree
array_value_type(tree type,int num)509 switch_conversion::array_value_type (tree type, int num)
510 {
511   unsigned int i, len = vec_safe_length (m_constructors[num]);
512   constructor_elt *elt;
513   int sign = 0;
514   tree smaller_type;
515 
516   /* Types with alignments greater than their size can reach here, e.g. out of
517      SRA.  We couldn't use these as an array component type so get back to the
518      main variant first, which, for our purposes, is fine for other types as
519      well.  */
520 
521   type = TYPE_MAIN_VARIANT (type);
522 
523   if (!INTEGRAL_TYPE_P (type))
524     return type;
525 
526   scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
527   scalar_int_mode mode = get_narrowest_mode (type_mode);
528   if (GET_MODE_SIZE (type_mode) <= GET_MODE_SIZE (mode))
529     return type;
530 
531   if (len < (optimize_bb_for_size_p (gimple_bb (m_switch)) ? 2 : 32))
532     return type;
533 
534   FOR_EACH_VEC_SAFE_ELT (m_constructors[num], i, elt)
535     {
536       wide_int cst;
537 
538       if (TREE_CODE (elt->value) != INTEGER_CST)
539 	return type;
540 
541       cst = wi::to_wide (elt->value);
542       while (1)
543 	{
544 	  unsigned int prec = GET_MODE_BITSIZE (mode);
545 	  if (prec > HOST_BITS_PER_WIDE_INT)
546 	    return type;
547 
548 	  if (sign >= 0 && cst == wi::zext (cst, prec))
549 	    {
550 	      if (sign == 0 && cst == wi::sext (cst, prec))
551 		break;
552 	      sign = 1;
553 	      break;
554 	    }
555 	  if (sign <= 0 && cst == wi::sext (cst, prec))
556 	    {
557 	      sign = -1;
558 	      break;
559 	    }
560 
561 	  if (sign == 1)
562 	    sign = 0;
563 
564 	  if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
565 	      || GET_MODE_SIZE (mode) >= GET_MODE_SIZE (type_mode))
566 	    return type;
567 	}
568     }
569 
570   if (sign == 0)
571     sign = TYPE_UNSIGNED (type) ? 1 : -1;
572   smaller_type = lang_hooks.types.type_for_mode (mode, sign >= 0);
573   if (GET_MODE_SIZE (type_mode)
574       <= GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (smaller_type)))
575     return type;
576 
577   return smaller_type;
578 }
579 
580 /* Create an appropriate array type and declaration and assemble a static
581    array variable.  Also create a load statement that initializes
582    the variable in question with a value from the static array.  SWTCH is
583    the switch statement being converted, NUM is the index to
584    arrays of constructors, default values and target SSA names
585    for this particular array.  ARR_INDEX_TYPE is the type of the index
586    of the new array, PHI is the phi node of the final BB that corresponds
587    to the value that will be loaded from the created array.  TIDX
588    is an ssa name of a temporary variable holding the index for loads from the
589    new array.  */
590 
591 void
build_one_array(int num,tree arr_index_type,gphi * phi,tree tidx)592 switch_conversion::build_one_array (int num, tree arr_index_type,
593 				    gphi *phi, tree tidx)
594 {
595   tree name;
596   gimple *load;
597   gimple_stmt_iterator gsi = gsi_for_stmt (m_switch);
598   location_t loc = gimple_location (m_switch);
599 
600   gcc_assert (m_default_values[num]);
601 
602   name = copy_ssa_name (PHI_RESULT (phi));
603   m_target_inbound_names[num] = name;
604 
605   vec<constructor_elt, va_gc> *constructor = m_constructors[num];
606   wide_int coeff_a, coeff_b;
607   bool linear_p = contains_linear_function_p (constructor, &coeff_a, &coeff_b);
608   tree type;
609   if (linear_p
610       && (type = range_check_type (TREE_TYPE ((*constructor)[0].value))))
611     {
612       if (dump_file && coeff_a.to_uhwi () > 0)
613 	fprintf (dump_file, "Linear transformation with A = %" PRId64
614 		 " and B = %" PRId64 "\n", coeff_a.to_shwi (),
615 		 coeff_b.to_shwi ());
616 
617       /* We must use type of constructor values.  */
618       gimple_seq seq = NULL;
619       tree tmp = gimple_convert (&seq, type, m_index_expr);
620       tree tmp2 = gimple_build (&seq, MULT_EXPR, type,
621 				wide_int_to_tree (type, coeff_a), tmp);
622       tree tmp3 = gimple_build (&seq, PLUS_EXPR, type, tmp2,
623 				wide_int_to_tree (type, coeff_b));
624       tree tmp4 = gimple_convert (&seq, TREE_TYPE (name), tmp3);
625       gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
626       load = gimple_build_assign (name, tmp4);
627     }
628   else
629     {
630       tree array_type, ctor, decl, value_type, fetch, default_type;
631 
632       default_type = TREE_TYPE (m_default_values[num]);
633       value_type = array_value_type (default_type, num);
634       array_type = build_array_type (value_type, arr_index_type);
635       if (default_type != value_type)
636 	{
637 	  unsigned int i;
638 	  constructor_elt *elt;
639 
640 	  FOR_EACH_VEC_SAFE_ELT (constructor, i, elt)
641 	    elt->value = fold_convert (value_type, elt->value);
642 	}
643       ctor = build_constructor (array_type, constructor);
644       TREE_CONSTANT (ctor) = true;
645       TREE_STATIC (ctor) = true;
646 
647       decl = build_decl (loc, VAR_DECL, NULL_TREE, array_type);
648       TREE_STATIC (decl) = 1;
649       DECL_INITIAL (decl) = ctor;
650 
651       DECL_NAME (decl) = create_tmp_var_name ("CSWTCH");
652       DECL_ARTIFICIAL (decl) = 1;
653       DECL_IGNORED_P (decl) = 1;
654       TREE_CONSTANT (decl) = 1;
655       TREE_READONLY (decl) = 1;
656       DECL_IGNORED_P (decl) = 1;
657       if (offloading_function_p (cfun->decl))
658 	DECL_ATTRIBUTES (decl)
659 	  = tree_cons (get_identifier ("omp declare target"), NULL_TREE,
660 		       NULL_TREE);
661       varpool_node::finalize_decl (decl);
662 
663       fetch = build4 (ARRAY_REF, value_type, decl, tidx, NULL_TREE,
664 		      NULL_TREE);
665       if (default_type != value_type)
666 	{
667 	  fetch = fold_convert (default_type, fetch);
668 	  fetch = force_gimple_operand_gsi (&gsi, fetch, true, NULL_TREE,
669 					    true, GSI_SAME_STMT);
670 	}
671       load = gimple_build_assign (name, fetch);
672     }
673 
674   gsi_insert_before (&gsi, load, GSI_SAME_STMT);
675   update_stmt (load);
676   m_arr_ref_last = load;
677 }
678 
679 /* Builds and initializes static arrays initialized with values gathered from
680    the switch statement.  Also creates statements that load values from
681    them.  */
682 
683 void
build_arrays()684 switch_conversion::build_arrays ()
685 {
686   tree arr_index_type;
687   tree tidx, sub, utype;
688   gimple *stmt;
689   gimple_stmt_iterator gsi;
690   gphi_iterator gpi;
691   int i;
692   location_t loc = gimple_location (m_switch);
693 
694   gsi = gsi_for_stmt (m_switch);
695 
696   /* Make sure we do not generate arithmetics in a subrange.  */
697   utype = TREE_TYPE (m_index_expr);
698   if (TREE_TYPE (utype))
699     utype = lang_hooks.types.type_for_mode (TYPE_MODE (TREE_TYPE (utype)), 1);
700   else
701     utype = lang_hooks.types.type_for_mode (TYPE_MODE (utype), 1);
702 
703   arr_index_type = build_index_type (m_range_size);
704   tidx = make_ssa_name (utype);
705   sub = fold_build2_loc (loc, MINUS_EXPR, utype,
706 			 fold_convert_loc (loc, utype, m_index_expr),
707 			 fold_convert_loc (loc, utype, m_range_min));
708   sub = force_gimple_operand_gsi (&gsi, sub,
709 				  false, NULL, true, GSI_SAME_STMT);
710   stmt = gimple_build_assign (tidx, sub);
711 
712   gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
713   update_stmt (stmt);
714   m_arr_ref_first = stmt;
715 
716   for (gpi = gsi_start_phis (m_final_bb), i = 0;
717        !gsi_end_p (gpi); gsi_next (&gpi))
718     {
719       gphi *phi = gpi.phi ();
720       if (!virtual_operand_p (gimple_phi_result (phi)))
721 	build_one_array (i++, arr_index_type, phi, tidx);
722       else
723 	{
724 	  edge e;
725 	  edge_iterator ei;
726 	  FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
727 	    {
728 	      if (e->dest == m_final_bb)
729 		break;
730 	      if (!m_default_case_nonstandard
731 		  || e->dest != m_default_bb)
732 		{
733 		  e = single_succ_edge (e->dest);
734 		  break;
735 		}
736 	    }
737 	  gcc_assert (e && e->dest == m_final_bb);
738 	  m_target_vop = PHI_ARG_DEF_FROM_EDGE (phi, e);
739 	}
740     }
741 }
742 
743 /* Generates and appropriately inserts loads of default values at the position
744    given by GSI.  Returns the last inserted statement.  */
745 
746 gassign *
gen_def_assigns(gimple_stmt_iterator * gsi)747 switch_conversion::gen_def_assigns (gimple_stmt_iterator *gsi)
748 {
749   int i;
750   gassign *assign = NULL;
751 
752   for (i = 0; i < m_phi_count; i++)
753     {
754       tree name = copy_ssa_name (m_target_inbound_names[i]);
755       m_target_outbound_names[i] = name;
756       assign = gimple_build_assign (name, m_default_values[i]);
757       gsi_insert_before (gsi, assign, GSI_SAME_STMT);
758       update_stmt (assign);
759     }
760   return assign;
761 }
762 
763 /* Deletes the unused bbs and edges that now contain the switch statement and
764    its empty branch bbs.  BBD is the now dead BB containing
765    the original switch statement, FINAL is the last BB of the converted
766    switch statement (in terms of succession).  */
767 
768 void
prune_bbs(basic_block bbd,basic_block final,basic_block default_bb)769 switch_conversion::prune_bbs (basic_block bbd, basic_block final,
770 			      basic_block default_bb)
771 {
772   edge_iterator ei;
773   edge e;
774 
775   for (ei = ei_start (bbd->succs); (e = ei_safe_edge (ei)); )
776     {
777       basic_block bb;
778       bb = e->dest;
779       remove_edge (e);
780       if (bb != final && bb != default_bb)
781 	delete_basic_block (bb);
782     }
783   delete_basic_block (bbd);
784 }
785 
786 /* Add values to phi nodes in final_bb for the two new edges.  E1F is the edge
787    from the basic block loading values from an array and E2F from the basic
788    block loading default values.  BBF is the last switch basic block (see the
789    bbf description in the comment below).  */
790 
791 void
fix_phi_nodes(edge e1f,edge e2f,basic_block bbf)792 switch_conversion::fix_phi_nodes (edge e1f, edge e2f, basic_block bbf)
793 {
794   gphi_iterator gsi;
795   int i;
796 
797   for (gsi = gsi_start_phis (bbf), i = 0;
798        !gsi_end_p (gsi); gsi_next (&gsi))
799     {
800       gphi *phi = gsi.phi ();
801       tree inbound, outbound;
802       if (virtual_operand_p (gimple_phi_result (phi)))
803 	inbound = outbound = m_target_vop;
804       else
805 	{
806 	  inbound = m_target_inbound_names[i];
807 	  outbound = m_target_outbound_names[i++];
808 	}
809       add_phi_arg (phi, inbound, e1f, UNKNOWN_LOCATION);
810       if (!m_default_case_nonstandard)
811 	add_phi_arg (phi, outbound, e2f, UNKNOWN_LOCATION);
812     }
813 }
814 
815 /* Creates a check whether the switch expression value actually falls into the
816    range given by all the cases.  If it does not, the temporaries are loaded
817    with default values instead.  */
818 
819 void
gen_inbound_check()820 switch_conversion::gen_inbound_check ()
821 {
822   tree label_decl1 = create_artificial_label (UNKNOWN_LOCATION);
823   tree label_decl2 = create_artificial_label (UNKNOWN_LOCATION);
824   tree label_decl3 = create_artificial_label (UNKNOWN_LOCATION);
825   glabel *label1, *label2, *label3;
826   tree utype, tidx;
827   tree bound;
828 
829   gcond *cond_stmt;
830 
831   gassign *last_assign = NULL;
832   gimple_stmt_iterator gsi;
833   basic_block bb0, bb1, bb2, bbf, bbd;
834   edge e01 = NULL, e02, e21, e1d, e1f, e2f;
835   location_t loc = gimple_location (m_switch);
836 
837   gcc_assert (m_default_values);
838 
839   bb0 = gimple_bb (m_switch);
840 
841   tidx = gimple_assign_lhs (m_arr_ref_first);
842   utype = TREE_TYPE (tidx);
843 
844   /* (end of) block 0 */
845   gsi = gsi_for_stmt (m_arr_ref_first);
846   gsi_next (&gsi);
847 
848   bound = fold_convert_loc (loc, utype, m_range_size);
849   cond_stmt = gimple_build_cond (LE_EXPR, tidx, bound, NULL_TREE, NULL_TREE);
850   gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
851   update_stmt (cond_stmt);
852 
853   /* block 2 */
854   if (!m_default_case_nonstandard)
855     {
856       label2 = gimple_build_label (label_decl2);
857       gsi_insert_before (&gsi, label2, GSI_SAME_STMT);
858       last_assign = gen_def_assigns (&gsi);
859     }
860 
861   /* block 1 */
862   label1 = gimple_build_label (label_decl1);
863   gsi_insert_before (&gsi, label1, GSI_SAME_STMT);
864 
865   /* block F */
866   gsi = gsi_start_bb (m_final_bb);
867   label3 = gimple_build_label (label_decl3);
868   gsi_insert_before (&gsi, label3, GSI_SAME_STMT);
869 
870   /* cfg fix */
871   e02 = split_block (bb0, cond_stmt);
872   bb2 = e02->dest;
873 
874   if (m_default_case_nonstandard)
875     {
876       bb1 = bb2;
877       bb2 = m_default_bb;
878       e01 = e02;
879       e01->flags = EDGE_TRUE_VALUE;
880       e02 = make_edge (bb0, bb2, EDGE_FALSE_VALUE);
881       edge e_default = find_edge (bb1, bb2);
882       for (gphi_iterator gsi = gsi_start_phis (bb2);
883 	   !gsi_end_p (gsi); gsi_next (&gsi))
884 	{
885 	  gphi *phi = gsi.phi ();
886 	  tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e_default);
887 	  add_phi_arg (phi, arg, e02,
888 		       gimple_phi_arg_location_from_edge (phi, e_default));
889 	}
890       /* Partially fix the dominator tree, if it is available.  */
891       if (dom_info_available_p (CDI_DOMINATORS))
892 	redirect_immediate_dominators (CDI_DOMINATORS, bb1, bb0);
893     }
894   else
895     {
896       e21 = split_block (bb2, last_assign);
897       bb1 = e21->dest;
898       remove_edge (e21);
899     }
900 
901   e1d = split_block (bb1, m_arr_ref_last);
902   bbd = e1d->dest;
903   remove_edge (e1d);
904 
905   /* Flags and profiles of the edge for in-range values.  */
906   if (!m_default_case_nonstandard)
907     e01 = make_edge (bb0, bb1, EDGE_TRUE_VALUE);
908   e01->probability = m_default_prob.invert ();
909 
910   /* Flags and profiles of the edge taking care of out-of-range values.  */
911   e02->flags &= ~EDGE_FALLTHRU;
912   e02->flags |= EDGE_FALSE_VALUE;
913   e02->probability = m_default_prob;
914 
915   bbf = m_final_bb;
916 
917   e1f = make_edge (bb1, bbf, EDGE_FALLTHRU);
918   e1f->probability = profile_probability::always ();
919 
920   if (m_default_case_nonstandard)
921     e2f = NULL;
922   else
923     {
924       e2f = make_edge (bb2, bbf, EDGE_FALLTHRU);
925       e2f->probability = profile_probability::always ();
926     }
927 
928   /* frequencies of the new BBs */
929   bb1->count = e01->count ();
930   bb2->count = e02->count ();
931   if (!m_default_case_nonstandard)
932     bbf->count = e1f->count () + e2f->count ();
933 
934   /* Tidy blocks that have become unreachable.  */
935   prune_bbs (bbd, m_final_bb,
936 	     m_default_case_nonstandard ? m_default_bb : NULL);
937 
938   /* Fixup the PHI nodes in bbF.  */
939   fix_phi_nodes (e1f, e2f, bbf);
940 
941   /* Fix the dominator tree, if it is available.  */
942   if (dom_info_available_p (CDI_DOMINATORS))
943     {
944       vec<basic_block> bbs_to_fix_dom;
945 
946       set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
947       if (!m_default_case_nonstandard)
948 	set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
949       if (! get_immediate_dominator (CDI_DOMINATORS, bbf))
950 	/* If bbD was the immediate dominator ...  */
951 	set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
952 
953       bbs_to_fix_dom.create (3 + (bb2 != bbf));
954       bbs_to_fix_dom.quick_push (bb0);
955       bbs_to_fix_dom.quick_push (bb1);
956       if (bb2 != bbf)
957 	bbs_to_fix_dom.quick_push (bb2);
958       bbs_to_fix_dom.quick_push (bbf);
959 
960       iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
961       bbs_to_fix_dom.release ();
962     }
963 }
964 
965 /* The following function is invoked on every switch statement (the current
966    one is given in SWTCH) and runs the individual phases of switch
967    conversion on it one after another until one fails or the conversion
968    is completed.  On success, NULL is in m_reason, otherwise points
969    to a string with the reason why the conversion failed.  */
970 
971 void
expand(gswitch * swtch)972 switch_conversion::expand (gswitch *swtch)
973 {
974   /* Group case labels so that we get the right results from the heuristics
975      that decide on the code generation approach for this switch.  */
976   m_cfg_altered |= group_case_labels_stmt (swtch);
977 
978   /* If this switch is now a degenerate case with only a default label,
979      there is nothing left for us to do.  */
980   if (gimple_switch_num_labels (swtch) < 2)
981     {
982       m_reason = "switch is a degenerate case";
983       return;
984     }
985 
986   collect (swtch);
987 
988   /* No error markers should reach here (they should be filtered out
989      during gimplification).  */
990   gcc_checking_assert (TREE_TYPE (m_index_expr) != error_mark_node);
991 
992   /* A switch on a constant should have been optimized in tree-cfg-cleanup.  */
993   gcc_checking_assert (!TREE_CONSTANT (m_index_expr));
994 
995   /* Prefer bit test if possible.  */
996   if (tree_fits_uhwi_p (m_range_size)
997       && bit_test_cluster::can_be_handled (tree_to_uhwi (m_range_size), m_uniq)
998       && bit_test_cluster::is_beneficial (m_count, m_uniq))
999     {
1000       m_reason = "expanding as bit test is preferable";
1001       return;
1002     }
1003 
1004   if (m_uniq <= 2)
1005     {
1006       /* This will be expanded as a decision tree .  */
1007       m_reason = "expanding as jumps is preferable";
1008       return;
1009     }
1010 
1011   /* If there is no common successor, we cannot do the transformation.  */
1012   if (!m_final_bb)
1013     {
1014       m_reason = "no common successor to all case label target blocks found";
1015       return;
1016     }
1017 
1018   /* Check the case label values are within reasonable range:  */
1019   if (!check_range ())
1020     {
1021       gcc_assert (m_reason);
1022       return;
1023     }
1024 
1025   /* For all the cases, see whether they are empty, the assignments they
1026      represent constant and so on...  */
1027   if (!check_all_empty_except_final ())
1028     {
1029       gcc_assert (m_reason);
1030       return;
1031     }
1032   if (!check_final_bb ())
1033     {
1034       gcc_assert (m_reason);
1035       return;
1036     }
1037 
1038   /* At this point all checks have passed and we can proceed with the
1039      transformation.  */
1040 
1041   create_temp_arrays ();
1042   gather_default_values (m_default_case_nonstandard
1043 			 ? gimple_switch_label (swtch, 1)
1044 			 : gimple_switch_default_label (swtch));
1045   build_constructors ();
1046 
1047   build_arrays (); /* Build the static arrays and assignments.  */
1048   gen_inbound_check ();	/* Build the bounds check.  */
1049 
1050   m_cfg_altered = true;
1051 }
1052 
1053 /* Destructor.  */
1054 
~switch_conversion()1055 switch_conversion::~switch_conversion ()
1056 {
1057   XDELETEVEC (m_constructors);
1058   XDELETEVEC (m_default_values);
1059 }
1060 
1061 /* Constructor.  */
1062 
group_cluster(vec<cluster * > & clusters,unsigned start,unsigned end)1063 group_cluster::group_cluster (vec<cluster *> &clusters,
1064 			      unsigned start, unsigned end)
1065 {
1066   gcc_checking_assert (end - start + 1 >= 1);
1067   m_prob = profile_probability::never ();
1068   m_cases.create (end - start + 1);
1069   for (unsigned i = start; i <= end; i++)
1070     {
1071       m_cases.quick_push (static_cast<simple_cluster *> (clusters[i]));
1072       m_prob += clusters[i]->m_prob;
1073     }
1074   m_subtree_prob = m_prob;
1075 }
1076 
1077 /* Destructor.  */
1078 
~group_cluster()1079 group_cluster::~group_cluster ()
1080 {
1081   for (unsigned i = 0; i < m_cases.length (); i++)
1082     delete m_cases[i];
1083 
1084   m_cases.release ();
1085 }
1086 
1087 /* Dump content of a cluster.  */
1088 
1089 void
dump(FILE * f,bool details)1090 group_cluster::dump (FILE *f, bool details)
1091 {
1092   unsigned total_values = 0;
1093   for (unsigned i = 0; i < m_cases.length (); i++)
1094     total_values += m_cases[i]->get_range (m_cases[i]->get_low (),
1095 					   m_cases[i]->get_high ());
1096 
1097   unsigned comparison_count = 0;
1098   for (unsigned i = 0; i < m_cases.length (); i++)
1099     {
1100       simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1101       comparison_count += sc->m_range_p ? 2 : 1;
1102     }
1103 
1104   unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1105   fprintf (f, "%s", get_type () == JUMP_TABLE ? "JT" : "BT");
1106 
1107   if (details)
1108     fprintf (f, "(values:%d comparisons:%d range:" HOST_WIDE_INT_PRINT_DEC
1109 	     " density: %.2f%%)", total_values, comparison_count, range,
1110 	     100.0f * comparison_count / range);
1111 
1112   fprintf (f, ":");
1113   PRINT_CASE (f, get_low ());
1114   fprintf (f, "-");
1115   PRINT_CASE (f, get_high ());
1116   fprintf (f, " ");
1117 }
1118 
1119 /* Emit GIMPLE code to handle the cluster.  */
1120 
1121 void
emit(tree index_expr,tree,tree default_label_expr,basic_block default_bb)1122 jump_table_cluster::emit (tree index_expr, tree,
1123 			  tree default_label_expr, basic_block default_bb)
1124 {
1125   unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1126   unsigned HOST_WIDE_INT nondefault_range = 0;
1127 
1128   /* For jump table we just emit a new gswitch statement that will
1129      be latter lowered to jump table.  */
1130   auto_vec <tree> labels;
1131   labels.create (m_cases.length ());
1132 
1133   make_edge (m_case_bb, default_bb, 0);
1134   for (unsigned i = 0; i < m_cases.length (); i++)
1135     {
1136       labels.quick_push (unshare_expr (m_cases[i]->m_case_label_expr));
1137       make_edge (m_case_bb, m_cases[i]->m_case_bb, 0);
1138     }
1139 
1140   gswitch *s = gimple_build_switch (index_expr,
1141 				    unshare_expr (default_label_expr), labels);
1142   gimple_stmt_iterator gsi = gsi_start_bb (m_case_bb);
1143   gsi_insert_after (&gsi, s, GSI_NEW_STMT);
1144 
1145   /* Set up even probabilities for all cases.  */
1146   for (unsigned i = 0; i < m_cases.length (); i++)
1147     {
1148       simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1149       edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1150       unsigned HOST_WIDE_INT case_range
1151 	= sc->get_range (sc->get_low (), sc->get_high ());
1152       nondefault_range += case_range;
1153 
1154       /* case_edge->aux is number of values in a jump-table that are covered
1155 	 by the case_edge.  */
1156       case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + case_range);
1157     }
1158 
1159   edge default_edge = gimple_switch_default_edge (cfun, s);
1160   default_edge->probability = profile_probability::never ();
1161 
1162   for (unsigned i = 0; i < m_cases.length (); i++)
1163     {
1164       simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1165       edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1166       case_edge->probability
1167 	= profile_probability::always ().apply_scale ((intptr_t)case_edge->aux,
1168 						      range);
1169     }
1170 
1171   /* Number of non-default values is probability of default edge.  */
1172   default_edge->probability
1173     += profile_probability::always ().apply_scale (nondefault_range,
1174 						   range).invert ();
1175 
1176   switch_decision_tree::reset_out_edges_aux (s);
1177 }
1178 
1179 /* Find jump tables of given CLUSTERS, where all members of the vector
1180    are of type simple_cluster.  New clusters are returned.  */
1181 
1182 vec<cluster *>
find_jump_tables(vec<cluster * > & clusters)1183 jump_table_cluster::find_jump_tables (vec<cluster *> &clusters)
1184 {
1185   if (!is_enabled ())
1186     return clusters.copy ();
1187 
1188   unsigned l = clusters.length ();
1189   auto_vec<min_cluster_item> min;
1190   min.reserve (l + 1);
1191 
1192   min.quick_push (min_cluster_item (0, 0, 0));
1193 
1194   for (unsigned i = 1; i <= l; i++)
1195     {
1196       /* Set minimal # of clusters with i-th item to infinite.  */
1197       min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1198 
1199       for (unsigned j = 0; j < i; j++)
1200 	{
1201 	  unsigned HOST_WIDE_INT s = min[j].m_non_jt_cases;
1202 	  if (i - j < case_values_threshold ())
1203 	    s += i - j;
1204 
1205 	  /* Prefer clusters with smaller number of numbers covered.  */
1206 	  if ((min[j].m_count + 1 < min[i].m_count
1207 	       || (min[j].m_count + 1 == min[i].m_count
1208 		   && s < min[i].m_non_jt_cases))
1209 	      && can_be_handled (clusters, j, i - 1))
1210 	    min[i] = min_cluster_item (min[j].m_count + 1, j, s);
1211 	}
1212 
1213       gcc_checking_assert (min[i].m_count != INT_MAX);
1214     }
1215 
1216   /* No result.  */
1217   if (min[l].m_count == INT_MAX)
1218     return clusters.copy ();
1219 
1220   vec<cluster *> output;
1221   output.create (4);
1222 
1223   /* Find and build the clusters.  */
1224   for (int end = l;;)
1225     {
1226       int start = min[end].m_start;
1227 
1228       /* Do not allow clusters with small number of cases.  */
1229       if (is_beneficial (clusters, start, end - 1))
1230 	output.safe_push (new jump_table_cluster (clusters, start, end - 1));
1231       else
1232 	for (int i = end - 1; i >= start; i--)
1233 	  output.safe_push (clusters[i]);
1234 
1235       end = start;
1236 
1237       if (start <= 0)
1238 	break;
1239     }
1240 
1241   output.reverse ();
1242   return output;
1243 }
1244 
1245 /* Return true when cluster starting at START and ending at END (inclusive)
1246    can build a jump-table.  */
1247 
1248 bool
can_be_handled(const vec<cluster * > & clusters,unsigned start,unsigned end)1249 jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
1250 				    unsigned start, unsigned end)
1251 {
1252   /* If the switch is relatively small such that the cost of one
1253      indirect jump on the target are higher than the cost of a
1254      decision tree, go with the decision tree.
1255 
1256      If range of values is much bigger than number of values,
1257      or if it is too large to represent in a HOST_WIDE_INT,
1258      make a sequence of conditional branches instead of a dispatch.
1259 
1260      The definition of "much bigger" depends on whether we are
1261      optimizing for size or for speed.  */
1262   if (!flag_jump_tables)
1263     return false;
1264 
1265   /* For algorithm correctness, jump table for a single case must return
1266      true.  We bail out in is_beneficial if it's called just for
1267      a single case.  */
1268   if (start == end)
1269     return true;
1270 
1271   unsigned HOST_WIDE_INT max_ratio
1272     = optimize_insn_for_size_p () ? max_ratio_for_size : max_ratio_for_speed;
1273   unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1274 					    clusters[end]->get_high ());
1275   /* Check overflow.  */
1276   if (range == 0)
1277     return false;
1278 
1279   unsigned HOST_WIDE_INT comparison_count = 0;
1280   for (unsigned i = start; i <= end; i++)
1281     {
1282       simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1283       comparison_count += sc->m_range_p ? 2 : 1;
1284     }
1285 
1286   return range <= max_ratio * comparison_count;
1287 }
1288 
1289 /* Return true if cluster starting at START and ending at END (inclusive)
1290    is profitable transformation.  */
1291 
1292 bool
is_beneficial(const vec<cluster * > &,unsigned start,unsigned end)1293 jump_table_cluster::is_beneficial (const vec<cluster *> &,
1294 				   unsigned start, unsigned end)
1295 {
1296   /* Single case bail out.  */
1297   if (start == end)
1298     return false;
1299 
1300   return end - start + 1 >= case_values_threshold ();
1301 }
1302 
1303 /* Definition of jump_table_cluster constants.  */
1304 
1305 const unsigned HOST_WIDE_INT jump_table_cluster::max_ratio_for_size;
1306 const unsigned HOST_WIDE_INT jump_table_cluster::max_ratio_for_speed;
1307 
1308 /* Find bit tests of given CLUSTERS, where all members of the vector
1309    are of type simple_cluster.  New clusters are returned.  */
1310 
1311 vec<cluster *>
find_bit_tests(vec<cluster * > & clusters)1312 bit_test_cluster::find_bit_tests (vec<cluster *> &clusters)
1313 {
1314   vec<cluster *> output;
1315   output.create (4);
1316 
1317   unsigned l = clusters.length ();
1318   auto_vec<min_cluster_item> min;
1319   min.reserve (l + 1);
1320 
1321   min.quick_push (min_cluster_item (0, 0, 0));
1322 
1323   for (unsigned i = 1; i <= l; i++)
1324     {
1325       /* Set minimal # of clusters with i-th item to infinite.  */
1326       min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1327 
1328       for (unsigned j = 0; j < i; j++)
1329 	{
1330 	  if (min[j].m_count + 1 < min[i].m_count
1331 	      && can_be_handled (clusters, j, i - 1))
1332 	    min[i] = min_cluster_item (min[j].m_count + 1, j, INT_MAX);
1333 	}
1334 
1335       gcc_checking_assert (min[i].m_count != INT_MAX);
1336     }
1337 
1338   /* No result.  */
1339   if (min[l].m_count == INT_MAX)
1340     return clusters.copy ();
1341 
1342   /* Find and build the clusters.  */
1343   for (unsigned end = l;;)
1344     {
1345       int start = min[end].m_start;
1346 
1347       if (is_beneficial (clusters, start, end - 1))
1348 	{
1349 	  bool entire = start == 0 && end == clusters.length ();
1350 	  output.safe_push (new bit_test_cluster (clusters, start, end - 1,
1351 						  entire));
1352 	}
1353       else
1354 	for (int i = end - 1; i >= start; i--)
1355 	  output.safe_push (clusters[i]);
1356 
1357       end = start;
1358 
1359       if (start <= 0)
1360 	break;
1361     }
1362 
1363   output.reverse ();
1364   return output;
1365 }
1366 
1367 /* Return true when RANGE of case values with UNIQ labels
1368    can build a bit test.  */
1369 
1370 bool
can_be_handled(unsigned HOST_WIDE_INT range,unsigned int uniq)1371 bit_test_cluster::can_be_handled (unsigned HOST_WIDE_INT range,
1372 				  unsigned int uniq)
1373 {
1374   /* Check overflow.  */
1375   if (range == 0)
1376     return 0;
1377 
1378   if (range >= GET_MODE_BITSIZE (word_mode))
1379     return false;
1380 
1381   return uniq <= 3;
1382 }
1383 
1384 /* Return true when cluster starting at START and ending at END (inclusive)
1385    can build a bit test.  */
1386 
1387 bool
can_be_handled(const vec<cluster * > & clusters,unsigned start,unsigned end)1388 bit_test_cluster::can_be_handled (const vec<cluster *> &clusters,
1389 				  unsigned start, unsigned end)
1390 {
1391   /* For algorithm correctness, bit test for a single case must return
1392      true.  We bail out in is_beneficial if it's called just for
1393      a single case.  */
1394   if (start == end)
1395     return true;
1396 
1397   unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1398 					    clusters[end]->get_high ());
1399   auto_bitmap dest_bbs;
1400 
1401   for (unsigned i = start; i <= end; i++)
1402     {
1403       simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1404       bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1405     }
1406 
1407   return can_be_handled (range, bitmap_count_bits (dest_bbs));
1408 }
1409 
1410 /* Return true when COUNT of cases of UNIQ labels is beneficial for bit test
1411    transformation.  */
1412 
1413 bool
is_beneficial(unsigned count,unsigned uniq)1414 bit_test_cluster::is_beneficial (unsigned count, unsigned uniq)
1415 {
1416   return (((uniq == 1 && count >= 3)
1417 	   || (uniq == 2 && count >= 5)
1418 	   || (uniq == 3 && count >= 6)));
1419 }
1420 
1421 /* Return true if cluster starting at START and ending at END (inclusive)
1422    is profitable transformation.  */
1423 
1424 bool
is_beneficial(const vec<cluster * > & clusters,unsigned start,unsigned end)1425 bit_test_cluster::is_beneficial (const vec<cluster *> &clusters,
1426 				 unsigned start, unsigned end)
1427 {
1428   /* Single case bail out.  */
1429   if (start == end)
1430     return false;
1431 
1432   auto_bitmap dest_bbs;
1433 
1434   for (unsigned i = start; i <= end; i++)
1435     {
1436       simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1437       bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1438     }
1439 
1440   unsigned uniq = bitmap_count_bits (dest_bbs);
1441   unsigned count = end - start + 1;
1442   return is_beneficial (count, uniq);
1443 }
1444 
1445 /* Comparison function for qsort to order bit tests by decreasing
1446    probability of execution.  */
1447 
1448 int
cmp(const void * p1,const void * p2)1449 case_bit_test::cmp (const void *p1, const void *p2)
1450 {
1451   const struct case_bit_test *const d1 = (const struct case_bit_test *) p1;
1452   const struct case_bit_test *const d2 = (const struct case_bit_test *) p2;
1453 
1454   if (d2->bits != d1->bits)
1455     return d2->bits - d1->bits;
1456 
1457   /* Stabilize the sort.  */
1458   return (LABEL_DECL_UID (CASE_LABEL (d2->label))
1459 	  - LABEL_DECL_UID (CASE_LABEL (d1->label)));
1460 }
1461 
1462 /*  Expand a switch statement by a short sequence of bit-wise
1463     comparisons.  "switch(x)" is effectively converted into
1464     "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are
1465     integer constants.
1466 
1467     INDEX_EXPR is the value being switched on.
1468 
1469     MINVAL is the lowest case value of in the case nodes,
1470     and RANGE is highest value minus MINVAL.  MINVAL and RANGE
1471     are not guaranteed to be of the same type as INDEX_EXPR
1472     (the gimplifier doesn't change the type of case label values,
1473     and MINVAL and RANGE are derived from those values).
1474     MAXVAL is MINVAL + RANGE.
1475 
1476     There *MUST* be max_case_bit_tests or less unique case
1477     node targets.  */
1478 
1479 void
emit(tree index_expr,tree index_type,tree,basic_block default_bb)1480 bit_test_cluster::emit (tree index_expr, tree index_type,
1481 			tree, basic_block default_bb)
1482 {
1483   struct case_bit_test test[m_max_case_bit_tests] = { {} };
1484   unsigned int i, j, k;
1485   unsigned int count;
1486 
1487   tree unsigned_index_type = range_check_type (index_type);
1488 
1489   gimple_stmt_iterator gsi;
1490   gassign *shift_stmt;
1491 
1492   tree idx, tmp, csui;
1493   tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
1494   tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
1495   tree word_mode_one = fold_convert (word_type_node, integer_one_node);
1496   int prec = TYPE_PRECISION (word_type_node);
1497   wide_int wone = wi::one (prec);
1498 
1499   tree minval = get_low ();
1500   tree maxval = get_high ();
1501   tree range = int_const_binop (MINUS_EXPR, maxval, minval);
1502   unsigned HOST_WIDE_INT bt_range = get_range (minval, maxval);
1503 
1504   /* Go through all case labels, and collect the case labels, profile
1505      counts, and other information we need to build the branch tests.  */
1506   count = 0;
1507   for (i = 0; i < m_cases.length (); i++)
1508     {
1509       unsigned int lo, hi;
1510       simple_cluster *n = static_cast<simple_cluster *> (m_cases[i]);
1511       for (k = 0; k < count; k++)
1512 	if (n->m_case_bb == test[k].target_bb)
1513 	  break;
1514 
1515       if (k == count)
1516 	{
1517 	  gcc_checking_assert (count < m_max_case_bit_tests);
1518 	  test[k].mask = wi::zero (prec);
1519 	  test[k].target_bb = n->m_case_bb;
1520 	  test[k].label = n->m_case_label_expr;
1521 	  test[k].bits = 0;
1522 	  count++;
1523 	}
1524 
1525       test[k].bits += n->get_range (n->get_low (), n->get_high ());
1526 
1527       lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_low (), minval));
1528       if (n->get_high () == NULL_TREE)
1529 	hi = lo;
1530       else
1531 	hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_high (),
1532 					    minval));
1533 
1534       for (j = lo; j <= hi; j++)
1535 	test[k].mask |= wi::lshift (wone, j);
1536     }
1537 
1538   qsort (test, count, sizeof (*test), case_bit_test::cmp);
1539 
1540   /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
1541      the minval subtractions, but it might make the mask constants more
1542      expensive.  So, compare the costs.  */
1543   if (compare_tree_int (minval, 0) > 0
1544       && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0)
1545     {
1546       int cost_diff;
1547       HOST_WIDE_INT m = tree_to_uhwi (minval);
1548       rtx reg = gen_raw_REG (word_mode, 10000);
1549       bool speed_p = optimize_insn_for_speed_p ();
1550       cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg,
1551 					      GEN_INT (-m)), speed_p);
1552       for (i = 0; i < count; i++)
1553 	{
1554 	  rtx r = immed_wide_int_const (test[i].mask, word_mode);
1555 	  cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
1556 				     word_mode, speed_p);
1557 	  r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
1558 	  cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
1559 				     word_mode, speed_p);
1560 	}
1561       if (cost_diff > 0)
1562 	{
1563 	  for (i = 0; i < count; i++)
1564 	    test[i].mask = wi::lshift (test[i].mask, m);
1565 	  minval = build_zero_cst (TREE_TYPE (minval));
1566 	  range = maxval;
1567 	}
1568     }
1569 
1570   /* Now build the test-and-branch code.  */
1571 
1572   gsi = gsi_last_bb (m_case_bb);
1573 
1574   /* idx = (unsigned)x - minval.  */
1575   idx = fold_convert (unsigned_index_type, index_expr);
1576   idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
1577 		     fold_convert (unsigned_index_type, minval));
1578   idx = force_gimple_operand_gsi (&gsi, idx,
1579 				  /*simple=*/true, NULL_TREE,
1580 				  /*before=*/true, GSI_SAME_STMT);
1581 
1582   if (m_handles_entire_switch)
1583     {
1584       /* if (idx > range) goto default */
1585       range
1586 	= force_gimple_operand_gsi (&gsi,
1587 				    fold_convert (unsigned_index_type, range),
1588 				    /*simple=*/true, NULL_TREE,
1589 				    /*before=*/true, GSI_SAME_STMT);
1590       tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
1591       basic_block new_bb
1592 	= hoist_edge_and_branch_if_true (&gsi, tmp, default_bb,
1593 					 profile_probability::unlikely ());
1594       gsi = gsi_last_bb (new_bb);
1595     }
1596 
1597   /* csui = (1 << (word_mode) idx) */
1598   csui = make_ssa_name (word_type_node);
1599   tmp = fold_build2 (LSHIFT_EXPR, word_type_node, word_mode_one,
1600 		     fold_convert (word_type_node, idx));
1601   tmp = force_gimple_operand_gsi (&gsi, tmp,
1602 				  /*simple=*/false, NULL_TREE,
1603 				  /*before=*/true, GSI_SAME_STMT);
1604   shift_stmt = gimple_build_assign (csui, tmp);
1605   gsi_insert_before (&gsi, shift_stmt, GSI_SAME_STMT);
1606   update_stmt (shift_stmt);
1607 
1608   profile_probability prob = profile_probability::always ();
1609 
1610   /* for each unique set of cases:
1611        if (const & csui) goto target  */
1612   for (k = 0; k < count; k++)
1613     {
1614       prob = profile_probability::always ().apply_scale (test[k].bits,
1615 							 bt_range);
1616       bt_range -= test[k].bits;
1617       tmp = wide_int_to_tree (word_type_node, test[k].mask);
1618       tmp = fold_build2 (BIT_AND_EXPR, word_type_node, csui, tmp);
1619       tmp = force_gimple_operand_gsi (&gsi, tmp,
1620 				      /*simple=*/true, NULL_TREE,
1621 				      /*before=*/true, GSI_SAME_STMT);
1622       tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp, word_mode_zero);
1623       basic_block new_bb
1624 	= hoist_edge_and_branch_if_true (&gsi, tmp, test[k].target_bb, prob);
1625       gsi = gsi_last_bb (new_bb);
1626     }
1627 
1628   /* We should have removed all edges now.  */
1629   gcc_assert (EDGE_COUNT (gsi_bb (gsi)->succs) == 0);
1630 
1631   /* If nothing matched, go to the default label.  */
1632   edge e = make_edge (gsi_bb (gsi), default_bb, EDGE_FALLTHRU);
1633   e->probability = profile_probability::always ();
1634 }
1635 
1636 /* Split the basic block at the statement pointed to by GSIP, and insert
1637    a branch to the target basic block of E_TRUE conditional on tree
1638    expression COND.
1639 
1640    It is assumed that there is already an edge from the to-be-split
1641    basic block to E_TRUE->dest block.  This edge is removed, and the
1642    profile information on the edge is re-used for the new conditional
1643    jump.
1644 
1645    The CFG is updated.  The dominator tree will not be valid after
1646    this transformation, but the immediate dominators are updated if
1647    UPDATE_DOMINATORS is true.
1648 
1649    Returns the newly created basic block.  */
1650 
1651 basic_block
hoist_edge_and_branch_if_true(gimple_stmt_iterator * gsip,tree cond,basic_block case_bb,profile_probability prob)1652 bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
1653 						 tree cond, basic_block case_bb,
1654 						 profile_probability prob)
1655 {
1656   tree tmp;
1657   gcond *cond_stmt;
1658   edge e_false;
1659   basic_block new_bb, split_bb = gsi_bb (*gsip);
1660 
1661   edge e_true = make_edge (split_bb, case_bb, EDGE_TRUE_VALUE);
1662   e_true->probability = prob;
1663   gcc_assert (e_true->src == split_bb);
1664 
1665   tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL,
1666 				  /*before=*/true, GSI_SAME_STMT);
1667   cond_stmt = gimple_build_cond_from_tree (tmp, NULL_TREE, NULL_TREE);
1668   gsi_insert_before (gsip, cond_stmt, GSI_SAME_STMT);
1669 
1670   e_false = split_block (split_bb, cond_stmt);
1671   new_bb = e_false->dest;
1672   redirect_edge_pred (e_true, split_bb);
1673 
1674   e_false->flags &= ~EDGE_FALLTHRU;
1675   e_false->flags |= EDGE_FALSE_VALUE;
1676   e_false->probability = e_true->probability.invert ();
1677   new_bb->count = e_false->count ();
1678 
1679   return new_bb;
1680 }
1681 
1682 /* Compute the number of case labels that correspond to each outgoing edge of
1683    switch statement.  Record this information in the aux field of the edge.  */
1684 
1685 void
compute_cases_per_edge()1686 switch_decision_tree::compute_cases_per_edge ()
1687 {
1688   reset_out_edges_aux (m_switch);
1689   int ncases = gimple_switch_num_labels (m_switch);
1690   for (int i = ncases - 1; i >= 1; --i)
1691     {
1692       edge case_edge = gimple_switch_edge (cfun, m_switch, i);
1693       case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + 1);
1694     }
1695 }
1696 
1697 /* Analyze switch statement and return true when the statement is expanded
1698    as decision tree.  */
1699 
1700 bool
analyze_switch_statement()1701 switch_decision_tree::analyze_switch_statement ()
1702 {
1703   unsigned l = gimple_switch_num_labels (m_switch);
1704   basic_block bb = gimple_bb (m_switch);
1705   auto_vec<cluster *> clusters;
1706   clusters.create (l - 1);
1707 
1708   basic_block default_bb = gimple_switch_default_bb (cfun, m_switch);
1709   m_case_bbs.reserve (l);
1710   m_case_bbs.quick_push (default_bb);
1711 
1712   compute_cases_per_edge ();
1713 
1714   for (unsigned i = 1; i < l; i++)
1715     {
1716       tree elt = gimple_switch_label (m_switch, i);
1717       tree lab = CASE_LABEL (elt);
1718       basic_block case_bb = label_to_block (cfun, lab);
1719       edge case_edge = find_edge (bb, case_bb);
1720       tree low = CASE_LOW (elt);
1721       tree high = CASE_HIGH (elt);
1722 
1723       profile_probability p
1724 	= case_edge->probability.apply_scale (1, (intptr_t) (case_edge->aux));
1725       clusters.quick_push (new simple_cluster (low, high, elt, case_edge->dest,
1726 					       p));
1727       m_case_bbs.quick_push (case_edge->dest);
1728     }
1729 
1730   reset_out_edges_aux (m_switch);
1731 
1732   /* Find jump table clusters.  */
1733   vec<cluster *> output = jump_table_cluster::find_jump_tables (clusters);
1734 
1735   /* Find bit test clusters.  */
1736   vec<cluster *> output2;
1737   auto_vec<cluster *> tmp;
1738   output2.create (1);
1739   tmp.create (1);
1740 
1741   for (unsigned i = 0; i < output.length (); i++)
1742     {
1743       cluster *c = output[i];
1744       if (c->get_type () != SIMPLE_CASE)
1745 	{
1746 	  if (!tmp.is_empty ())
1747 	    {
1748 	      vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1749 	      output2.safe_splice (n);
1750 	      n.release ();
1751 	      tmp.truncate (0);
1752 	    }
1753 	  output2.safe_push (c);
1754 	}
1755       else
1756 	tmp.safe_push (c);
1757     }
1758 
1759   /* We still can have a temporary vector to test.  */
1760   if (!tmp.is_empty ())
1761     {
1762       vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1763       output2.safe_splice (n);
1764       n.release ();
1765     }
1766 
1767   if (dump_file)
1768     {
1769       fprintf (dump_file, ";; GIMPLE switch case clusters: ");
1770       for (unsigned i = 0; i < output2.length (); i++)
1771 	output2[i]->dump (dump_file, dump_flags & TDF_DETAILS);
1772       fprintf (dump_file, "\n");
1773     }
1774 
1775   output.release ();
1776 
1777   bool expanded = try_switch_expansion (output2);
1778 
1779   for (unsigned i = 0; i < output2.length (); i++)
1780     delete output2[i];
1781 
1782   output2.release ();
1783 
1784   return expanded;
1785 }
1786 
1787 /* Attempt to expand CLUSTERS as a decision tree.  Return true when
1788    expanded.  */
1789 
1790 bool
try_switch_expansion(vec<cluster * > & clusters)1791 switch_decision_tree::try_switch_expansion (vec<cluster *> &clusters)
1792 {
1793   tree index_expr = gimple_switch_index (m_switch);
1794   tree index_type = TREE_TYPE (index_expr);
1795   basic_block bb = gimple_bb (m_switch);
1796 
1797   if (gimple_switch_num_labels (m_switch) == 1
1798       || range_check_type (index_type) == NULL_TREE)
1799     return false;
1800 
1801   /* Find the default case target label.  */
1802   edge default_edge = gimple_switch_default_edge (cfun, m_switch);
1803   m_default_bb = default_edge->dest;
1804 
1805   /* Do the insertion of a case label into m_case_list.  The labels are
1806      fed to us in descending order from the sorted vector of case labels used
1807      in the tree part of the middle end.  So the list we construct is
1808      sorted in ascending order.  */
1809 
1810   for (int i = clusters.length () - 1; i >= 0; i--)
1811     {
1812       case_tree_node *r = m_case_list;
1813       m_case_list = m_case_node_pool.allocate ();
1814       m_case_list->m_right = r;
1815       m_case_list->m_c = clusters[i];
1816     }
1817 
1818   record_phi_operand_mapping ();
1819 
1820   /* Split basic block that contains the gswitch statement.  */
1821   gimple_stmt_iterator gsi = gsi_last_bb (bb);
1822   edge e;
1823   if (gsi_end_p (gsi))
1824     e = split_block_after_labels (bb);
1825   else
1826     {
1827       gsi_prev (&gsi);
1828       e = split_block (bb, gsi_stmt (gsi));
1829     }
1830   bb = split_edge (e);
1831 
1832   /* Create new basic blocks for non-case clusters where specific expansion
1833      needs to happen.  */
1834   for (unsigned i = 0; i < clusters.length (); i++)
1835     if (clusters[i]->get_type () != SIMPLE_CASE)
1836       {
1837 	clusters[i]->m_case_bb = create_empty_bb (bb);
1838 	clusters[i]->m_case_bb->loop_father = bb->loop_father;
1839       }
1840 
1841   /* Do not do an extra work for a single cluster.  */
1842   if (clusters.length () == 1
1843       && clusters[0]->get_type () != SIMPLE_CASE)
1844     {
1845       cluster *c = clusters[0];
1846       c->emit (index_expr, index_type,
1847 	       gimple_switch_default_label (m_switch), m_default_bb);
1848       redirect_edge_succ (single_succ_edge (bb), c->m_case_bb);
1849     }
1850   else
1851     {
1852       emit (bb, index_expr, default_edge->probability, index_type);
1853 
1854       /* Emit cluster-specific switch handling.  */
1855       for (unsigned i = 0; i < clusters.length (); i++)
1856 	if (clusters[i]->get_type () != SIMPLE_CASE)
1857 	  clusters[i]->emit (index_expr, index_type,
1858 			     gimple_switch_default_label (m_switch),
1859 			     m_default_bb);
1860     }
1861 
1862   fix_phi_operands_for_edges ();
1863 
1864   return true;
1865 }
1866 
1867 /* Before switch transformation, record all SSA_NAMEs defined in switch BB
1868    and used in a label basic block.  */
1869 
1870 void
record_phi_operand_mapping()1871 switch_decision_tree::record_phi_operand_mapping ()
1872 {
1873   basic_block switch_bb = gimple_bb (m_switch);
1874   /* Record all PHI nodes that have to be fixed after conversion.  */
1875   for (unsigned i = 0; i < m_case_bbs.length (); i++)
1876     {
1877       gphi_iterator gsi;
1878       basic_block bb = m_case_bbs[i];
1879       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1880 	{
1881 	  gphi *phi = gsi.phi ();
1882 
1883 	  for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
1884 	    {
1885 	      basic_block phi_src_bb = gimple_phi_arg_edge (phi, i)->src;
1886 	      if (phi_src_bb == switch_bb)
1887 		{
1888 		  tree def = gimple_phi_arg_def (phi, i);
1889 		  tree result = gimple_phi_result (phi);
1890 		  m_phi_mapping.put (result, def);
1891 		  break;
1892 		}
1893 	    }
1894 	}
1895     }
1896 }
1897 
1898 /* Append new operands to PHI statements that were introduced due to
1899    addition of new edges to case labels.  */
1900 
1901 void
fix_phi_operands_for_edges()1902 switch_decision_tree::fix_phi_operands_for_edges ()
1903 {
1904   gphi_iterator gsi;
1905 
1906   for (unsigned i = 0; i < m_case_bbs.length (); i++)
1907     {
1908       basic_block bb = m_case_bbs[i];
1909       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1910 	{
1911 	  gphi *phi = gsi.phi ();
1912 	  for (unsigned j = 0; j < gimple_phi_num_args (phi); j++)
1913 	    {
1914 	      tree def = gimple_phi_arg_def (phi, j);
1915 	      if (def == NULL_TREE)
1916 		{
1917 		  edge e = gimple_phi_arg_edge (phi, j);
1918 		  tree *definition
1919 		    = m_phi_mapping.get (gimple_phi_result (phi));
1920 		  gcc_assert (definition);
1921 		  add_phi_arg (phi, *definition, e, UNKNOWN_LOCATION);
1922 		}
1923 	    }
1924 	}
1925     }
1926 }
1927 
1928 /* Generate a decision tree, switching on INDEX_EXPR and jumping to
1929    one of the labels in CASE_LIST or to the DEFAULT_LABEL.
1930 
1931    We generate a binary decision tree to select the appropriate target
1932    code.  */
1933 
1934 void
emit(basic_block bb,tree index_expr,profile_probability default_prob,tree index_type)1935 switch_decision_tree::emit (basic_block bb, tree index_expr,
1936 			    profile_probability default_prob, tree index_type)
1937 {
1938   balance_case_nodes (&m_case_list, NULL);
1939 
1940   if (dump_file)
1941     dump_function_to_file (current_function_decl, dump_file, dump_flags);
1942   if (dump_file && (dump_flags & TDF_DETAILS))
1943     {
1944       int indent_step = ceil_log2 (TYPE_PRECISION (index_type)) + 2;
1945       fprintf (dump_file, ";; Expanding GIMPLE switch as decision tree:\n");
1946       gcc_assert (m_case_list != NULL);
1947       dump_case_nodes (dump_file, m_case_list, indent_step, 0);
1948     }
1949 
1950   bb = emit_case_nodes (bb, index_expr, m_case_list, default_prob, index_type,
1951 			gimple_location (m_switch));
1952 
1953   if (bb)
1954     emit_jump (bb, m_default_bb);
1955 
1956   /* Remove all edges and do just an edge that will reach default_bb.  */
1957   bb = gimple_bb (m_switch);
1958   gimple_stmt_iterator gsi = gsi_last_bb (bb);
1959   gsi_remove (&gsi, true);
1960 
1961   delete_basic_block (bb);
1962 }
1963 
1964 /* Take an ordered list of case nodes
1965    and transform them into a near optimal binary tree,
1966    on the assumption that any target code selection value is as
1967    likely as any other.
1968 
1969    The transformation is performed by splitting the ordered
1970    list into two equal sections plus a pivot.  The parts are
1971    then attached to the pivot as left and right branches.  Each
1972    branch is then transformed recursively.  */
1973 
1974 void
balance_case_nodes(case_tree_node ** head,case_tree_node * parent)1975 switch_decision_tree::balance_case_nodes (case_tree_node **head,
1976 					  case_tree_node *parent)
1977 {
1978   case_tree_node *np;
1979 
1980   np = *head;
1981   if (np)
1982     {
1983       int i = 0;
1984       int ranges = 0;
1985       case_tree_node **npp;
1986       case_tree_node *left;
1987       profile_probability prob = profile_probability::never ();
1988 
1989       /* Count the number of entries on branch.  Also count the ranges.  */
1990 
1991       while (np)
1992 	{
1993 	  if (!tree_int_cst_equal (np->m_c->get_low (), np->m_c->get_high ()))
1994 	    ranges++;
1995 
1996 	  i++;
1997 	  prob += np->m_c->m_prob;
1998 	  np = np->m_right;
1999 	}
2000 
2001       if (i > 2)
2002 	{
2003 	  /* Split this list if it is long enough for that to help.  */
2004 	  npp = head;
2005 	  left = *npp;
2006 	  profile_probability pivot_prob = prob.apply_scale (1, 2);
2007 
2008 	  /* Find the place in the list that bisects the list's total cost,
2009 	     where ranges count as 2.  */
2010 	  while (1)
2011 	    {
2012 	      /* Skip nodes while their probability does not reach
2013 		 that amount.  */
2014 	      prob -= (*npp)->m_c->m_prob;
2015 	      if ((prob.initialized_p () && prob < pivot_prob)
2016 		  || ! (*npp)->m_right)
2017 		break;
2018 	      npp = &(*npp)->m_right;
2019 	    }
2020 
2021 	  np = *npp;
2022  	  *npp = 0;
2023 	  *head = np;
2024 	  np->m_parent = parent;
2025 	  np->m_left = left == np ? NULL : left;
2026 
2027 	  /* Optimize each of the two split parts.  */
2028 	  balance_case_nodes (&np->m_left, np);
2029 	  balance_case_nodes (&np->m_right, np);
2030 	  np->m_c->m_subtree_prob = np->m_c->m_prob;
2031 	  if (np->m_left)
2032 	    np->m_c->m_subtree_prob += np->m_left->m_c->m_subtree_prob;
2033 	  if (np->m_right)
2034 	    np->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2035 	}
2036       else
2037 	{
2038 	  /* Else leave this branch as one level,
2039 	     but fill in `parent' fields.  */
2040 	  np = *head;
2041 	  np->m_parent = parent;
2042 	  np->m_c->m_subtree_prob = np->m_c->m_prob;
2043 	  for (; np->m_right; np = np->m_right)
2044 	    {
2045 	      np->m_right->m_parent = np;
2046 	      (*head)->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2047 	    }
2048 	}
2049     }
2050 }
2051 
2052 /* Dump ROOT, a list or tree of case nodes, to file.  */
2053 
2054 void
dump_case_nodes(FILE * f,case_tree_node * root,int indent_step,int indent_level)2055 switch_decision_tree::dump_case_nodes (FILE *f, case_tree_node *root,
2056 				       int indent_step, int indent_level)
2057 {
2058   if (root == 0)
2059     return;
2060   indent_level++;
2061 
2062   dump_case_nodes (f, root->m_left, indent_step, indent_level);
2063 
2064   fputs (";; ", f);
2065   fprintf (f, "%*s", indent_step * indent_level, "");
2066   root->m_c->dump (f);
2067   root->m_c->m_prob.dump (f);
2068   fputs (" subtree: ", f);
2069   root->m_c->m_subtree_prob.dump (f);
2070   fputs (")\n", f);
2071 
2072   dump_case_nodes (f, root->m_right, indent_step, indent_level);
2073 }
2074 
2075 
2076 /* Add an unconditional jump to CASE_BB that happens in basic block BB.  */
2077 
2078 void
emit_jump(basic_block bb,basic_block case_bb)2079 switch_decision_tree::emit_jump (basic_block bb, basic_block case_bb)
2080 {
2081   edge e = single_succ_edge (bb);
2082   redirect_edge_succ (e, case_bb);
2083 }
2084 
2085 /* Generate code to compare OP0 with OP1 so that the condition codes are
2086    set and to jump to LABEL_BB if the condition is true.
2087    COMPARISON is the GIMPLE comparison (EQ, NE, GT, etc.).
2088    PROB is the probability of jumping to LABEL_BB.  */
2089 
2090 basic_block
emit_cmp_and_jump_insns(basic_block bb,tree op0,tree op1,tree_code comparison,basic_block label_bb,profile_probability prob,location_t loc)2091 switch_decision_tree::emit_cmp_and_jump_insns (basic_block bb, tree op0,
2092 					       tree op1, tree_code comparison,
2093 					       basic_block label_bb,
2094 					       profile_probability prob,
2095 					       location_t loc)
2096 {
2097   // TODO: it's once called with lhs != index.
2098   op1 = fold_convert (TREE_TYPE (op0), op1);
2099 
2100   gcond *cond = gimple_build_cond (comparison, op0, op1, NULL_TREE, NULL_TREE);
2101   gimple_set_location (cond, loc);
2102   gimple_stmt_iterator gsi = gsi_last_bb (bb);
2103   gsi_insert_after (&gsi, cond, GSI_NEW_STMT);
2104 
2105   gcc_assert (single_succ_p (bb));
2106 
2107   /* Make a new basic block where false branch will take place.  */
2108   edge false_edge = split_block (bb, cond);
2109   false_edge->flags = EDGE_FALSE_VALUE;
2110   false_edge->probability = prob.invert ();
2111 
2112   edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2113   true_edge->probability = prob;
2114 
2115   return false_edge->dest;
2116 }
2117 
2118 /* Generate code to jump to LABEL if OP0 and OP1 are equal.
2119    PROB is the probability of jumping to LABEL_BB.
2120    BB is a basic block where the new condition will be placed.  */
2121 
2122 basic_block
do_jump_if_equal(basic_block bb,tree op0,tree op1,basic_block label_bb,profile_probability prob,location_t loc)2123 switch_decision_tree::do_jump_if_equal (basic_block bb, tree op0, tree op1,
2124 					basic_block label_bb,
2125 					profile_probability prob,
2126 					location_t loc)
2127 {
2128   op1 = fold_convert (TREE_TYPE (op0), op1);
2129 
2130   gcond *cond = gimple_build_cond (EQ_EXPR, op0, op1, NULL_TREE, NULL_TREE);
2131   gimple_set_location (cond, loc);
2132   gimple_stmt_iterator gsi = gsi_last_bb (bb);
2133   gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
2134 
2135   gcc_assert (single_succ_p (bb));
2136 
2137   /* Make a new basic block where false branch will take place.  */
2138   edge false_edge = split_block (bb, cond);
2139   false_edge->flags = EDGE_FALSE_VALUE;
2140   false_edge->probability = prob.invert ();
2141 
2142   edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2143   true_edge->probability = prob;
2144 
2145   return false_edge->dest;
2146 }
2147 
2148 /* Emit step-by-step code to select a case for the value of INDEX.
2149    The thus generated decision tree follows the form of the
2150    case-node binary tree NODE, whose nodes represent test conditions.
2151    DEFAULT_PROB is probability of cases leading to default BB.
2152    INDEX_TYPE is the type of the index of the switch.  */
2153 
2154 basic_block
emit_case_nodes(basic_block bb,tree index,case_tree_node * node,profile_probability default_prob,tree index_type,location_t loc)2155 switch_decision_tree::emit_case_nodes (basic_block bb, tree index,
2156 				       case_tree_node *node,
2157 				       profile_probability default_prob,
2158 				       tree index_type, location_t loc)
2159 {
2160   profile_probability p;
2161 
2162   /* If node is null, we are done.  */
2163   if (node == NULL)
2164     return bb;
2165 
2166   /* Single value case.  */
2167   if (node->m_c->is_single_value_p ())
2168     {
2169       /* Node is single valued.  First see if the index expression matches
2170 	 this node and then check our children, if any.  */
2171       p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2172       bb = do_jump_if_equal (bb, index, node->m_c->get_low (),
2173 			     node->m_c->m_case_bb, p, loc);
2174       /* Since this case is taken at this point, reduce its weight from
2175 	 subtree_weight.  */
2176       node->m_c->m_subtree_prob -= p;
2177 
2178       if (node->m_left != NULL && node->m_right != NULL)
2179 	{
2180 	  /* 1) the node has both children
2181 
2182 	     If both children are single-valued cases with no
2183 	     children, finish up all the work.  This way, we can save
2184 	     one ordered comparison.  */
2185 
2186 	  if (!node->m_left->has_child ()
2187 	      && node->m_left->m_c->is_single_value_p ()
2188 	      && !node->m_right->has_child ()
2189 	      && node->m_right->m_c->is_single_value_p ())
2190 	    {
2191 	      p = (node->m_right->m_c->m_prob
2192 		   / (node->m_c->m_subtree_prob + default_prob));
2193 	      bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2194 				     node->m_right->m_c->m_case_bb, p, loc);
2195 
2196 	      p = (node->m_left->m_c->m_prob
2197 		   / (node->m_c->m_subtree_prob + default_prob));
2198 	      bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2199 				     node->m_left->m_c->m_case_bb, p, loc);
2200 	    }
2201 	  else
2202 	    {
2203 	      /* Branch to a label where we will handle it later.  */
2204 	      basic_block test_bb = split_edge (single_succ_edge (bb));
2205 	      redirect_edge_succ (single_pred_edge (test_bb),
2206 				  single_succ_edge (bb)->dest);
2207 
2208 	      p = ((node->m_right->m_c->m_subtree_prob
2209 		    + default_prob.apply_scale (1, 2))
2210 		   / (node->m_c->m_subtree_prob + default_prob));
2211 	      bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2212 					    GT_EXPR, test_bb, p, loc);
2213 	      default_prob = default_prob.apply_scale (1, 2);
2214 
2215 	      /* Handle the left-hand subtree.  */
2216 	      bb = emit_case_nodes (bb, index, node->m_left,
2217 				    default_prob, index_type, loc);
2218 
2219 	      /* If the left-hand subtree fell through,
2220 		 don't let it fall into the right-hand subtree.  */
2221 	      if (bb && m_default_bb)
2222 		emit_jump (bb, m_default_bb);
2223 
2224 	      bb = emit_case_nodes (test_bb, index, node->m_right,
2225 				    default_prob, index_type, loc);
2226 	    }
2227 	}
2228       else if (node->m_left == NULL && node->m_right != NULL)
2229 	{
2230 	  /* 2) the node has only right child.  */
2231 
2232 	  /* Here we have a right child but no left so we issue a conditional
2233 	     branch to default and process the right child.
2234 
2235 	     Omit the conditional branch to default if the right child
2236 	     does not have any children and is single valued; it would
2237 	     cost too much space to save so little time.  */
2238 
2239 	  if (node->m_right->has_child ()
2240 	      || !node->m_right->m_c->is_single_value_p ())
2241 	    {
2242 	      p = (default_prob.apply_scale (1, 2)
2243 		   / (node->m_c->m_subtree_prob + default_prob));
2244 	      bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2245 					    LT_EXPR, m_default_bb, p, loc);
2246 	      default_prob = default_prob.apply_scale (1, 2);
2247 
2248 	      bb = emit_case_nodes (bb, index, node->m_right, default_prob,
2249 				    index_type, loc);
2250 	    }
2251 	  else
2252 	    {
2253 	      /* We cannot process node->right normally
2254 		 since we haven't ruled out the numbers less than
2255 		 this node's value.  So handle node->right explicitly.  */
2256 	      p = (node->m_right->m_c->m_subtree_prob
2257 		   / (node->m_c->m_subtree_prob + default_prob));
2258 	      bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2259 				     node->m_right->m_c->m_case_bb, p, loc);
2260 	    }
2261 	}
2262       else if (node->m_left != NULL && node->m_right == NULL)
2263 	{
2264 	  /* 3) just one subtree, on the left.  Similar case as previous.  */
2265 
2266 	  if (node->m_left->has_child ()
2267 	      || !node->m_left->m_c->is_single_value_p ())
2268 	    {
2269 	      p = (default_prob.apply_scale (1, 2)
2270 		   / (node->m_c->m_subtree_prob + default_prob));
2271 	      bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2272 					    GT_EXPR, m_default_bb, p, loc);
2273 		  default_prob = default_prob.apply_scale (1, 2);
2274 
2275 	      bb = emit_case_nodes (bb, index, node->m_left, default_prob,
2276 				    index_type, loc);
2277 	    }
2278 	  else
2279 	    {
2280 	      /* We cannot process node->left normally
2281 		 since we haven't ruled out the numbers less than
2282 		 this node's value.  So handle node->left explicitly.  */
2283 	      p = (node->m_left->m_c->m_subtree_prob
2284 		   / (node->m_c->m_subtree_prob + default_prob));
2285 	      bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2286 				     node->m_left->m_c->m_case_bb, p, loc);
2287 	    }
2288 	}
2289     }
2290   else
2291     {
2292       /* Node is a range.  These cases are very similar to those for a single
2293 	 value, except that we do not start by testing whether this node
2294 	 is the one to branch to.  */
2295       if (node->has_child () || node->m_c->get_type () != SIMPLE_CASE)
2296 	{
2297 	  /* Branch to a label where we will handle it later.  */
2298 	  basic_block test_bb = split_edge (single_succ_edge (bb));
2299 	  redirect_edge_succ (single_pred_edge (test_bb),
2300 			      single_succ_edge (bb)->dest);
2301 
2302 
2303 	   profile_probability right_prob = profile_probability::never ();
2304 	   if (node->m_right)
2305 	     right_prob = node->m_right->m_c->m_subtree_prob;
2306 	  p = ((right_prob + default_prob.apply_scale (1, 2))
2307 	       / (node->m_c->m_subtree_prob + default_prob));
2308 
2309 	  bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2310 					GT_EXPR, test_bb, p, loc);
2311 	  default_prob = default_prob.apply_scale (1, 2);
2312 
2313 	  /* Value belongs to this node or to the left-hand subtree.  */
2314 	  p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2315 	  bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2316 					GE_EXPR, node->m_c->m_case_bb, p, loc);
2317 
2318 	  /* Handle the left-hand subtree.  */
2319 	  bb = emit_case_nodes (bb, index, node->m_left,
2320 				default_prob, index_type, loc);
2321 
2322 	  /* If the left-hand subtree fell through,
2323 	     don't let it fall into the right-hand subtree.  */
2324 	  if (bb && m_default_bb)
2325 	    emit_jump (bb, m_default_bb);
2326 
2327 	  bb = emit_case_nodes (test_bb, index, node->m_right,
2328 				default_prob, index_type, loc);
2329 	}
2330       else
2331 	{
2332 	  /* Node has no children so we check low and high bounds to remove
2333 	     redundant tests.  Only one of the bounds can exist,
2334 	     since otherwise this node is bounded--a case tested already.  */
2335 	  tree lhs, rhs;
2336 	  generate_range_test (bb, index, node->m_c->get_low (),
2337 			       node->m_c->get_high (), &lhs, &rhs);
2338 	  p = default_prob / (node->m_c->m_subtree_prob + default_prob);
2339 
2340 	  bb = emit_cmp_and_jump_insns (bb, lhs, rhs, GT_EXPR,
2341 					m_default_bb, p, loc);
2342 
2343 	  emit_jump (bb, node->m_c->m_case_bb);
2344 	  return NULL;
2345 	}
2346     }
2347 
2348   return bb;
2349 }
2350 
2351 /* The main function of the pass scans statements for switches and invokes
2352    process_switch on them.  */
2353 
2354 namespace {
2355 
2356 const pass_data pass_data_convert_switch =
2357 {
2358   GIMPLE_PASS, /* type */
2359   "switchconv", /* name */
2360   OPTGROUP_NONE, /* optinfo_flags */
2361   TV_TREE_SWITCH_CONVERSION, /* tv_id */
2362   ( PROP_cfg | PROP_ssa ), /* properties_required */
2363   0, /* properties_provided */
2364   0, /* properties_destroyed */
2365   0, /* todo_flags_start */
2366   TODO_update_ssa, /* todo_flags_finish */
2367 };
2368 
2369 class pass_convert_switch : public gimple_opt_pass
2370 {
2371 public:
pass_convert_switch(gcc::context * ctxt)2372   pass_convert_switch (gcc::context *ctxt)
2373     : gimple_opt_pass (pass_data_convert_switch, ctxt)
2374   {}
2375 
2376   /* opt_pass methods: */
gate(function *)2377   virtual bool gate (function *) { return flag_tree_switch_conversion != 0; }
2378   virtual unsigned int execute (function *);
2379 
2380 }; // class pass_convert_switch
2381 
2382 unsigned int
execute(function * fun)2383 pass_convert_switch::execute (function *fun)
2384 {
2385   basic_block bb;
2386   bool cfg_altered = false;
2387 
2388   FOR_EACH_BB_FN (bb, fun)
2389   {
2390     gimple *stmt = last_stmt (bb);
2391     if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
2392       {
2393 	if (dump_file)
2394 	  {
2395 	    expanded_location loc = expand_location (gimple_location (stmt));
2396 
2397 	    fprintf (dump_file, "beginning to process the following "
2398 		     "SWITCH statement (%s:%d) : ------- \n",
2399 		     loc.file, loc.line);
2400 	    print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2401 	    putc ('\n', dump_file);
2402 	  }
2403 
2404 	switch_conversion sconv;
2405 	sconv.expand (as_a <gswitch *> (stmt));
2406 	cfg_altered |= sconv.m_cfg_altered;
2407 	if (!sconv.m_reason)
2408 	  {
2409 	    if (dump_file)
2410 	      {
2411 		fputs ("Switch converted\n", dump_file);
2412 		fputs ("--------------------------------\n", dump_file);
2413 	      }
2414 
2415 	    /* Make no effort to update the post-dominator tree.
2416 	       It is actually not that hard for the transformations
2417 	       we have performed, but it is not supported
2418 	       by iterate_fix_dominators.  */
2419 	    free_dominance_info (CDI_POST_DOMINATORS);
2420 	  }
2421 	else
2422 	  {
2423 	    if (dump_file)
2424 	      {
2425 		fputs ("Bailing out - ", dump_file);
2426 		fputs (sconv.m_reason, dump_file);
2427 		fputs ("\n--------------------------------\n", dump_file);
2428 	      }
2429 	  }
2430       }
2431   }
2432 
2433   return cfg_altered ? TODO_cleanup_cfg : 0;;
2434 }
2435 
2436 } // anon namespace
2437 
2438 gimple_opt_pass *
make_pass_convert_switch(gcc::context * ctxt)2439 make_pass_convert_switch (gcc::context *ctxt)
2440 {
2441   return new pass_convert_switch (ctxt);
2442 }
2443 
2444 /* The main function of the pass scans statements for switches and invokes
2445    process_switch on them.  */
2446 
2447 namespace {
2448 
2449 template <bool O0> class pass_lower_switch: public gimple_opt_pass
2450 {
2451 public:
pass_lower_switch(gcc::context * ctxt)2452   pass_lower_switch (gcc::context *ctxt) : gimple_opt_pass (data, ctxt) {}
2453 
2454   static const pass_data data;
2455   opt_pass *
clone()2456   clone ()
2457   {
2458     return new pass_lower_switch<O0> (m_ctxt);
2459   }
2460 
2461   virtual bool
gate(function *)2462   gate (function *)
2463   {
2464     return !O0 || !optimize;
2465   }
2466 
2467   virtual unsigned int execute (function *fun);
2468 }; // class pass_lower_switch
2469 
2470 template <bool O0>
2471 const pass_data pass_lower_switch<O0>::data = {
2472   GIMPLE_PASS,		       /* type */
2473   O0 ? "switchlower_O0" : "switchlower", /* name */
2474   OPTGROUP_NONE, /* optinfo_flags */
2475   TV_TREE_SWITCH_LOWERING, /* tv_id */
2476   ( PROP_cfg | PROP_ssa ), /* properties_required */
2477   0, /* properties_provided */
2478   0, /* properties_destroyed */
2479   0, /* todo_flags_start */
2480   TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
2481 };
2482 
2483 template <bool O0>
2484 unsigned int
execute(function * fun)2485 pass_lower_switch<O0>::execute (function *fun)
2486 {
2487   basic_block bb;
2488   bool expanded = false;
2489 
2490   auto_vec<gimple *> switch_statements;
2491   switch_statements.create (1);
2492 
2493   FOR_EACH_BB_FN (bb, fun)
2494     {
2495       gimple *stmt = last_stmt (bb);
2496       gswitch *swtch;
2497       if (stmt && (swtch = dyn_cast<gswitch *> (stmt)))
2498 	{
2499 	  if (!O0)
2500 	    group_case_labels_stmt (swtch);
2501 	  switch_statements.safe_push (swtch);
2502 	}
2503     }
2504 
2505   for (unsigned i = 0; i < switch_statements.length (); i++)
2506     {
2507       gimple *stmt = switch_statements[i];
2508       if (dump_file)
2509 	{
2510 	  expanded_location loc = expand_location (gimple_location (stmt));
2511 
2512 	  fprintf (dump_file, "beginning to process the following "
2513 		   "SWITCH statement (%s:%d) : ------- \n",
2514 		   loc.file, loc.line);
2515 	  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2516 	  putc ('\n', dump_file);
2517 	}
2518 
2519       gswitch *swtch = dyn_cast<gswitch *> (stmt);
2520       if (swtch)
2521 	{
2522 	  switch_decision_tree dt (swtch);
2523 	  expanded |= dt.analyze_switch_statement ();
2524 	}
2525     }
2526 
2527   if (expanded)
2528     {
2529       free_dominance_info (CDI_DOMINATORS);
2530       free_dominance_info (CDI_POST_DOMINATORS);
2531       mark_virtual_operands_for_renaming (cfun);
2532     }
2533 
2534   return 0;
2535 }
2536 
2537 } // anon namespace
2538 
2539 gimple_opt_pass *
make_pass_lower_switch_O0(gcc::context * ctxt)2540 make_pass_lower_switch_O0 (gcc::context *ctxt)
2541 {
2542   return new pass_lower_switch<true> (ctxt);
2543 }
2544 gimple_opt_pass *
make_pass_lower_switch(gcc::context * ctxt)2545 make_pass_lower_switch (gcc::context *ctxt)
2546 {
2547   return new pass_lower_switch<false> (ctxt);
2548 }
2549 
2550 
2551