1 /* Branch prediction routines for the GNU compiler.
2    Copyright (C) 2000-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* References:
21 
22    [1] "Branch Prediction for Free"
23        Ball and Larus; PLDI '93.
24    [2] "Static Branch Frequency and Program Profile Analysis"
25        Wu and Larus; MICRO-27.
26    [3] "Corpus-based Static Branch Prediction"
27        Calder, Grunwald, Lindsay, Martin, Mozer, and Zorn; PLDI '95.  */
28 
29 
30 #include "config.h"
31 #include "system.h"
32 #include "coretypes.h"
33 #include "backend.h"
34 #include "rtl.h"
35 #include "tree.h"
36 #include "gimple.h"
37 #include "cfghooks.h"
38 #include "tree-pass.h"
39 #include "ssa.h"
40 #include "memmodel.h"
41 #include "emit-rtl.h"
42 #include "cgraph.h"
43 #include "coverage.h"
44 #include "diagnostic-core.h"
45 #include "gimple-predict.h"
46 #include "fold-const.h"
47 #include "calls.h"
48 #include "cfganal.h"
49 #include "profile.h"
50 #include "sreal.h"
51 #include "params.h"
52 #include "cfgloop.h"
53 #include "gimple-iterator.h"
54 #include "tree-cfg.h"
55 #include "tree-ssa-loop-niter.h"
56 #include "tree-ssa-loop.h"
57 #include "tree-scalar-evolution.h"
58 #include "ipa-utils.h"
59 #include "gimple-pretty-print.h"
60 #include "selftest.h"
61 #include "cfgrtl.h"
62 #include "stringpool.h"
63 #include "attribs.h"
64 
65 /* Enum with reasons why a predictor is ignored.  */
66 
67 enum predictor_reason
68 {
69   REASON_NONE,
70   REASON_IGNORED,
71   REASON_SINGLE_EDGE_DUPLICATE,
72   REASON_EDGE_PAIR_DUPLICATE
73 };
74 
75 /* String messages for the aforementioned enum.  */
76 
77 static const char *reason_messages[] = {"", " (ignored)",
78     " (single edge duplicate)", " (edge pair duplicate)"};
79 
80 /* real constants: 0, 1, 1-1/REG_BR_PROB_BASE, REG_BR_PROB_BASE,
81 		   1/REG_BR_PROB_BASE, 0.5, BB_FREQ_MAX.  */
82 static sreal real_almost_one, real_br_prob_base,
83 	     real_inv_br_prob_base, real_one_half, real_bb_freq_max;
84 
85 static void combine_predictions_for_insn (rtx_insn *, basic_block);
86 static void dump_prediction (FILE *, enum br_predictor, int, basic_block,
87 			     enum predictor_reason, edge);
88 static void predict_paths_leading_to (basic_block, enum br_predictor,
89 				      enum prediction,
90 				      struct loop *in_loop = NULL);
91 static void predict_paths_leading_to_edge (edge, enum br_predictor,
92 					   enum prediction,
93 					   struct loop *in_loop = NULL);
94 static bool can_predict_insn_p (const rtx_insn *);
95 static HOST_WIDE_INT get_predictor_value (br_predictor, HOST_WIDE_INT);
96 static void determine_unlikely_bbs ();
97 
98 /* Information we hold about each branch predictor.
99    Filled using information from predict.def.  */
100 
101 struct predictor_info
102 {
103   const char *const name;	/* Name used in the debugging dumps.  */
104   const int hitrate;		/* Expected hitrate used by
105 				   predict_insn_def call.  */
106   const int flags;
107 };
108 
109 /* Use given predictor without Dempster-Shaffer theory if it matches
110    using first_match heuristics.  */
111 #define PRED_FLAG_FIRST_MATCH 1
112 
113 /* Recompute hitrate in percent to our representation.  */
114 
115 #define HITRATE(VAL) ((int) ((VAL) * REG_BR_PROB_BASE + 50) / 100)
116 
117 #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) {NAME, HITRATE, FLAGS},
118 static const struct predictor_info predictor_info[]= {
119 #include "predict.def"
120 
121   /* Upper bound on predictors.  */
122   {NULL, 0, 0}
123 };
124 #undef DEF_PREDICTOR
125 
126 static gcov_type min_count = -1;
127 
128 /* Determine the threshold for hot BB counts.  */
129 
130 gcov_type
get_hot_bb_threshold()131 get_hot_bb_threshold ()
132 {
133   if (min_count == -1)
134     {
135       min_count
136 	= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION);
137       if (dump_file)
138 	fprintf (dump_file, "Setting hotness threshold to %" PRId64 ".\n",
139 		 min_count);
140     }
141   return min_count;
142 }
143 
144 /* Set the threshold for hot BB counts.  */
145 
146 void
set_hot_bb_threshold(gcov_type min)147 set_hot_bb_threshold (gcov_type min)
148 {
149   min_count = min;
150 }
151 
152 /* Return TRUE if frequency FREQ is considered to be hot.  */
153 
154 bool
maybe_hot_count_p(struct function * fun,profile_count count)155 maybe_hot_count_p (struct function *fun, profile_count count)
156 {
157   if (!count.initialized_p ())
158     return true;
159   if (count.ipa () == profile_count::zero ())
160     return false;
161   if (!count.ipa_p ())
162     {
163       struct cgraph_node *node = cgraph_node::get (fun->decl);
164       if (!profile_info || profile_status_for_fn (fun) != PROFILE_READ)
165 	{
166 	  if (node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
167 	    return false;
168 	  if (node->frequency == NODE_FREQUENCY_HOT)
169 	    return true;
170 	}
171       if (profile_status_for_fn (fun) == PROFILE_ABSENT)
172 	return true;
173       if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
174 	  && count < (ENTRY_BLOCK_PTR_FOR_FN (fun)->count.apply_scale (2, 3)))
175 	return false;
176       if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0)
177 	return false;
178       if (count.apply_scale (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION), 1)
179 	  < ENTRY_BLOCK_PTR_FOR_FN (fun)->count)
180 	return false;
181       return true;
182     }
183   /* Code executed at most once is not hot.  */
184   if (count <= MAX (profile_info ? profile_info->runs : 1, 1))
185     return false;
186   return (count.to_gcov_type () >= get_hot_bb_threshold ());
187 }
188 
189 /* Return true in case BB can be CPU intensive and should be optimized
190    for maximal performance.  */
191 
192 bool
maybe_hot_bb_p(struct function * fun,const_basic_block bb)193 maybe_hot_bb_p (struct function *fun, const_basic_block bb)
194 {
195   gcc_checking_assert (fun);
196   return maybe_hot_count_p (fun, bb->count);
197 }
198 
199 /* Return true in case BB can be CPU intensive and should be optimized
200    for maximal performance.  */
201 
202 bool
maybe_hot_edge_p(edge e)203 maybe_hot_edge_p (edge e)
204 {
205   return maybe_hot_count_p (cfun, e->count ());
206 }
207 
208 /* Return true if profile COUNT and FREQUENCY, or function FUN static
209    node frequency reflects never being executed.  */
210 
211 static bool
probably_never_executed(struct function * fun,profile_count count)212 probably_never_executed (struct function *fun,
213                          profile_count count)
214 {
215   gcc_checking_assert (fun);
216   if (count.ipa () == profile_count::zero ())
217     return true;
218   /* Do not trust adjusted counts.  This will make us to drop int cold section
219      code with low execution count as a result of inlining. These low counts
220      are not safe even with read profile and may lead us to dropping
221      code which actually gets executed into cold section of binary that is not
222      desirable.  */
223   if (count.precise_p () && profile_status_for_fn (fun) == PROFILE_READ)
224     {
225       int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
226       if (count.apply_scale (unlikely_count_fraction, 1) >= profile_info->runs)
227 	return false;
228       return true;
229     }
230   if ((!profile_info || profile_status_for_fn (fun) != PROFILE_READ)
231       && (cgraph_node::get (fun->decl)->frequency
232 	  == NODE_FREQUENCY_UNLIKELY_EXECUTED))
233     return true;
234   return false;
235 }
236 
237 
238 /* Return true in case BB is probably never executed.  */
239 
240 bool
probably_never_executed_bb_p(struct function * fun,const_basic_block bb)241 probably_never_executed_bb_p (struct function *fun, const_basic_block bb)
242 {
243   return probably_never_executed (fun, bb->count);
244 }
245 
246 
247 /* Return true if E is unlikely executed for obvious reasons.  */
248 
249 static bool
unlikely_executed_edge_p(edge e)250 unlikely_executed_edge_p (edge e)
251 {
252   return (e->count () == profile_count::zero ()
253 	  || e->probability == profile_probability::never ())
254 	 || (e->flags & (EDGE_EH | EDGE_FAKE));
255 }
256 
257 /* Return true in case edge E is probably never executed.  */
258 
259 bool
probably_never_executed_edge_p(struct function * fun,edge e)260 probably_never_executed_edge_p (struct function *fun, edge e)
261 {
262   if (unlikely_executed_edge_p (e))
263     return true;
264   return probably_never_executed (fun, e->count ());
265 }
266 
267 /* Return true when current function should always be optimized for size.  */
268 
269 bool
optimize_function_for_size_p(struct function * fun)270 optimize_function_for_size_p (struct function *fun)
271 {
272   if (!fun || !fun->decl)
273     return optimize_size;
274   cgraph_node *n = cgraph_node::get (fun->decl);
275   return n && n->optimize_for_size_p ();
276 }
277 
278 /* Return true when current function should always be optimized for speed.  */
279 
280 bool
optimize_function_for_speed_p(struct function * fun)281 optimize_function_for_speed_p (struct function *fun)
282 {
283   return !optimize_function_for_size_p (fun);
284 }
285 
286 /* Return the optimization type that should be used for the function FUN.  */
287 
288 optimization_type
function_optimization_type(struct function * fun)289 function_optimization_type (struct function *fun)
290 {
291   return (optimize_function_for_speed_p (fun)
292 	  ? OPTIMIZE_FOR_SPEED
293 	  : OPTIMIZE_FOR_SIZE);
294 }
295 
296 /* Return TRUE when BB should be optimized for size.  */
297 
298 bool
optimize_bb_for_size_p(const_basic_block bb)299 optimize_bb_for_size_p (const_basic_block bb)
300 {
301   return (optimize_function_for_size_p (cfun)
302 	  || (bb && !maybe_hot_bb_p (cfun, bb)));
303 }
304 
305 /* Return TRUE when BB should be optimized for speed.  */
306 
307 bool
optimize_bb_for_speed_p(const_basic_block bb)308 optimize_bb_for_speed_p (const_basic_block bb)
309 {
310   return !optimize_bb_for_size_p (bb);
311 }
312 
313 /* Return the optimization type that should be used for block BB.  */
314 
315 optimization_type
bb_optimization_type(const_basic_block bb)316 bb_optimization_type (const_basic_block bb)
317 {
318   return (optimize_bb_for_speed_p (bb)
319 	  ? OPTIMIZE_FOR_SPEED
320 	  : OPTIMIZE_FOR_SIZE);
321 }
322 
323 /* Return TRUE when BB should be optimized for size.  */
324 
325 bool
optimize_edge_for_size_p(edge e)326 optimize_edge_for_size_p (edge e)
327 {
328   return optimize_function_for_size_p (cfun) || !maybe_hot_edge_p (e);
329 }
330 
331 /* Return TRUE when BB should be optimized for speed.  */
332 
333 bool
optimize_edge_for_speed_p(edge e)334 optimize_edge_for_speed_p (edge e)
335 {
336   return !optimize_edge_for_size_p (e);
337 }
338 
339 /* Return TRUE when BB should be optimized for size.  */
340 
341 bool
optimize_insn_for_size_p(void)342 optimize_insn_for_size_p (void)
343 {
344   return optimize_function_for_size_p (cfun) || !crtl->maybe_hot_insn_p;
345 }
346 
347 /* Return TRUE when BB should be optimized for speed.  */
348 
349 bool
optimize_insn_for_speed_p(void)350 optimize_insn_for_speed_p (void)
351 {
352   return !optimize_insn_for_size_p ();
353 }
354 
355 /* Return TRUE when LOOP should be optimized for size.  */
356 
357 bool
optimize_loop_for_size_p(struct loop * loop)358 optimize_loop_for_size_p (struct loop *loop)
359 {
360   return optimize_bb_for_size_p (loop->header);
361 }
362 
363 /* Return TRUE when LOOP should be optimized for speed.  */
364 
365 bool
optimize_loop_for_speed_p(struct loop * loop)366 optimize_loop_for_speed_p (struct loop *loop)
367 {
368   return optimize_bb_for_speed_p (loop->header);
369 }
370 
371 /* Return TRUE when LOOP nest should be optimized for speed.  */
372 
373 bool
optimize_loop_nest_for_speed_p(struct loop * loop)374 optimize_loop_nest_for_speed_p (struct loop *loop)
375 {
376   struct loop *l = loop;
377   if (optimize_loop_for_speed_p (loop))
378     return true;
379   l = loop->inner;
380   while (l && l != loop)
381     {
382       if (optimize_loop_for_speed_p (l))
383         return true;
384       if (l->inner)
385         l = l->inner;
386       else if (l->next)
387         l = l->next;
388       else
389         {
390 	  while (l != loop && !l->next)
391 	    l = loop_outer (l);
392 	  if (l != loop)
393 	    l = l->next;
394 	}
395     }
396   return false;
397 }
398 
399 /* Return TRUE when LOOP nest should be optimized for size.  */
400 
401 bool
optimize_loop_nest_for_size_p(struct loop * loop)402 optimize_loop_nest_for_size_p (struct loop *loop)
403 {
404   return !optimize_loop_nest_for_speed_p (loop);
405 }
406 
407 /* Return true when edge E is likely to be well predictable by branch
408    predictor.  */
409 
410 bool
predictable_edge_p(edge e)411 predictable_edge_p (edge e)
412 {
413   if (!e->probability.initialized_p ())
414     return false;
415   if ((e->probability.to_reg_br_prob_base ()
416        <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100)
417       || (REG_BR_PROB_BASE - e->probability.to_reg_br_prob_base ()
418           <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100))
419     return true;
420   return false;
421 }
422 
423 
424 /* Set RTL expansion for BB profile.  */
425 
426 void
rtl_profile_for_bb(basic_block bb)427 rtl_profile_for_bb (basic_block bb)
428 {
429   crtl->maybe_hot_insn_p = maybe_hot_bb_p (cfun, bb);
430 }
431 
432 /* Set RTL expansion for edge profile.  */
433 
434 void
rtl_profile_for_edge(edge e)435 rtl_profile_for_edge (edge e)
436 {
437   crtl->maybe_hot_insn_p = maybe_hot_edge_p (e);
438 }
439 
440 /* Set RTL expansion to default mode (i.e. when profile info is not known).  */
441 void
default_rtl_profile(void)442 default_rtl_profile (void)
443 {
444   crtl->maybe_hot_insn_p = true;
445 }
446 
447 /* Return true if the one of outgoing edges is already predicted by
448    PREDICTOR.  */
449 
450 bool
rtl_predicted_by_p(const_basic_block bb,enum br_predictor predictor)451 rtl_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
452 {
453   rtx note;
454   if (!INSN_P (BB_END (bb)))
455     return false;
456   for (note = REG_NOTES (BB_END (bb)); note; note = XEXP (note, 1))
457     if (REG_NOTE_KIND (note) == REG_BR_PRED
458 	&& INTVAL (XEXP (XEXP (note, 0), 0)) == (int)predictor)
459       return true;
460   return false;
461 }
462 
463 /*  Structure representing predictions in tree level. */
464 
465 struct edge_prediction {
466     struct edge_prediction *ep_next;
467     edge ep_edge;
468     enum br_predictor ep_predictor;
469     int ep_probability;
470 };
471 
472 /* This map contains for a basic block the list of predictions for the
473    outgoing edges.  */
474 
475 static hash_map<const_basic_block, edge_prediction *> *bb_predictions;
476 
477 /* Return true if the one of outgoing edges is already predicted by
478    PREDICTOR.  */
479 
480 bool
gimple_predicted_by_p(const_basic_block bb,enum br_predictor predictor)481 gimple_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
482 {
483   struct edge_prediction *i;
484   edge_prediction **preds = bb_predictions->get (bb);
485 
486   if (!preds)
487     return false;
488 
489   for (i = *preds; i; i = i->ep_next)
490     if (i->ep_predictor == predictor)
491       return true;
492   return false;
493 }
494 
495 /* Return true if the one of outgoing edges is already predicted by
496    PREDICTOR for edge E predicted as TAKEN.  */
497 
498 bool
edge_predicted_by_p(edge e,enum br_predictor predictor,bool taken)499 edge_predicted_by_p (edge e, enum br_predictor predictor, bool taken)
500 {
501   struct edge_prediction *i;
502   basic_block bb = e->src;
503   edge_prediction **preds = bb_predictions->get (bb);
504   if (!preds)
505     return false;
506 
507   int probability = predictor_info[(int) predictor].hitrate;
508 
509   if (taken != TAKEN)
510     probability = REG_BR_PROB_BASE - probability;
511 
512   for (i = *preds; i; i = i->ep_next)
513     if (i->ep_predictor == predictor
514 	&& i->ep_edge == e
515 	&& i->ep_probability == probability)
516       return true;
517   return false;
518 }
519 
520 /* Same predicate as above, working on edges.  */
521 bool
edge_probability_reliable_p(const_edge e)522 edge_probability_reliable_p (const_edge e)
523 {
524   return e->probability.probably_reliable_p ();
525 }
526 
527 /* Same predicate as edge_probability_reliable_p, working on notes.  */
528 bool
br_prob_note_reliable_p(const_rtx note)529 br_prob_note_reliable_p (const_rtx note)
530 {
531   gcc_assert (REG_NOTE_KIND (note) == REG_BR_PROB);
532   return profile_probability::from_reg_br_prob_note
533 		 (XINT (note, 0)).probably_reliable_p ();
534 }
535 
536 static void
predict_insn(rtx_insn * insn,enum br_predictor predictor,int probability)537 predict_insn (rtx_insn *insn, enum br_predictor predictor, int probability)
538 {
539   gcc_assert (any_condjump_p (insn));
540   if (!flag_guess_branch_prob)
541     return;
542 
543   add_reg_note (insn, REG_BR_PRED,
544 		gen_rtx_CONCAT (VOIDmode,
545 				GEN_INT ((int) predictor),
546 				GEN_INT ((int) probability)));
547 }
548 
549 /* Predict insn by given predictor.  */
550 
551 void
predict_insn_def(rtx_insn * insn,enum br_predictor predictor,enum prediction taken)552 predict_insn_def (rtx_insn *insn, enum br_predictor predictor,
553 		  enum prediction taken)
554 {
555    int probability = predictor_info[(int) predictor].hitrate;
556    gcc_assert (probability != PROB_UNINITIALIZED);
557 
558    if (taken != TAKEN)
559      probability = REG_BR_PROB_BASE - probability;
560 
561    predict_insn (insn, predictor, probability);
562 }
563 
564 /* Predict edge E with given probability if possible.  */
565 
566 void
rtl_predict_edge(edge e,enum br_predictor predictor,int probability)567 rtl_predict_edge (edge e, enum br_predictor predictor, int probability)
568 {
569   rtx_insn *last_insn;
570   last_insn = BB_END (e->src);
571 
572   /* We can store the branch prediction information only about
573      conditional jumps.  */
574   if (!any_condjump_p (last_insn))
575     return;
576 
577   /* We always store probability of branching.  */
578   if (e->flags & EDGE_FALLTHRU)
579     probability = REG_BR_PROB_BASE - probability;
580 
581   predict_insn (last_insn, predictor, probability);
582 }
583 
584 /* Predict edge E with the given PROBABILITY.  */
585 void
gimple_predict_edge(edge e,enum br_predictor predictor,int probability)586 gimple_predict_edge (edge e, enum br_predictor predictor, int probability)
587 {
588   if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
589       && EDGE_COUNT (e->src->succs) > 1
590       && flag_guess_branch_prob
591       && optimize)
592     {
593       struct edge_prediction *i = XNEW (struct edge_prediction);
594       edge_prediction *&preds = bb_predictions->get_or_insert (e->src);
595 
596       i->ep_next = preds;
597       preds = i;
598       i->ep_probability = probability;
599       i->ep_predictor = predictor;
600       i->ep_edge = e;
601     }
602 }
603 
604 /* Filter edge predictions PREDS by a function FILTER.  DATA are passed
605    to the filter function.  */
606 
607 void
filter_predictions(edge_prediction ** preds,bool (* filter)(edge_prediction *,void *),void * data)608 filter_predictions (edge_prediction **preds,
609 		    bool (*filter) (edge_prediction *, void *), void *data)
610 {
611   if (!bb_predictions)
612     return;
613 
614   if (preds)
615     {
616       struct edge_prediction **prediction = preds;
617       struct edge_prediction *next;
618 
619       while (*prediction)
620 	{
621 	  if ((*filter) (*prediction, data))
622 	    prediction = &((*prediction)->ep_next);
623 	  else
624 	    {
625 	      next = (*prediction)->ep_next;
626 	      free (*prediction);
627 	      *prediction = next;
628 	    }
629 	}
630     }
631 }
632 
633 /* Filter function predicate that returns true for a edge predicate P
634    if its edge is equal to DATA.  */
635 
636 bool
equal_edge_p(edge_prediction * p,void * data)637 equal_edge_p (edge_prediction *p, void *data)
638 {
639   return p->ep_edge == (edge)data;
640 }
641 
642 /* Remove all predictions on given basic block that are attached
643    to edge E.  */
644 void
remove_predictions_associated_with_edge(edge e)645 remove_predictions_associated_with_edge (edge e)
646 {
647   if (!bb_predictions)
648     return;
649 
650   edge_prediction **preds = bb_predictions->get (e->src);
651   filter_predictions (preds, equal_edge_p, e);
652 }
653 
654 /* Clears the list of predictions stored for BB.  */
655 
656 static void
clear_bb_predictions(basic_block bb)657 clear_bb_predictions (basic_block bb)
658 {
659   edge_prediction **preds = bb_predictions->get (bb);
660   struct edge_prediction *pred, *next;
661 
662   if (!preds)
663     return;
664 
665   for (pred = *preds; pred; pred = next)
666     {
667       next = pred->ep_next;
668       free (pred);
669     }
670   *preds = NULL;
671 }
672 
673 /* Return true when we can store prediction on insn INSN.
674    At the moment we represent predictions only on conditional
675    jumps, not at computed jump or other complicated cases.  */
676 static bool
can_predict_insn_p(const rtx_insn * insn)677 can_predict_insn_p (const rtx_insn *insn)
678 {
679   return (JUMP_P (insn)
680 	  && any_condjump_p (insn)
681 	  && EDGE_COUNT (BLOCK_FOR_INSN (insn)->succs) >= 2);
682 }
683 
684 /* Predict edge E by given predictor if possible.  */
685 
686 void
predict_edge_def(edge e,enum br_predictor predictor,enum prediction taken)687 predict_edge_def (edge e, enum br_predictor predictor,
688 		  enum prediction taken)
689 {
690    int probability = predictor_info[(int) predictor].hitrate;
691 
692    if (taken != TAKEN)
693      probability = REG_BR_PROB_BASE - probability;
694 
695    predict_edge (e, predictor, probability);
696 }
697 
698 /* Invert all branch predictions or probability notes in the INSN.  This needs
699    to be done each time we invert the condition used by the jump.  */
700 
701 void
invert_br_probabilities(rtx insn)702 invert_br_probabilities (rtx insn)
703 {
704   rtx note;
705 
706   for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
707     if (REG_NOTE_KIND (note) == REG_BR_PROB)
708       XINT (note, 0) = profile_probability::from_reg_br_prob_note
709 			 (XINT (note, 0)).invert ().to_reg_br_prob_note ();
710     else if (REG_NOTE_KIND (note) == REG_BR_PRED)
711       XEXP (XEXP (note, 0), 1)
712 	= GEN_INT (REG_BR_PROB_BASE - INTVAL (XEXP (XEXP (note, 0), 1)));
713 }
714 
715 /* Dump information about the branch prediction to the output file.  */
716 
717 static void
718 dump_prediction (FILE *file, enum br_predictor predictor, int probability,
719 		 basic_block bb, enum predictor_reason reason = REASON_NONE,
720 		 edge ep_edge = NULL)
721 {
722   edge e = ep_edge;
723   edge_iterator ei;
724 
725   if (!file)
726     return;
727 
728   if (e == NULL)
729     FOR_EACH_EDGE (e, ei, bb->succs)
730       if (! (e->flags & EDGE_FALLTHRU))
731 	break;
732 
733   char edge_info_str[128];
734   if (ep_edge)
735     sprintf (edge_info_str, " of edge %d->%d", ep_edge->src->index,
736 	     ep_edge->dest->index);
737   else
738     edge_info_str[0] = '\0';
739 
740   fprintf (file, "  %s heuristics%s%s: %.2f%%",
741 	   predictor_info[predictor].name,
742 	   edge_info_str, reason_messages[reason],
743 	   probability * 100.0 / REG_BR_PROB_BASE);
744 
745   if (bb->count.initialized_p ())
746     {
747       fprintf (file, "  exec ");
748       bb->count.dump (file);
749       if (e)
750 	{
751 	  fprintf (file, " hit ");
752 	  e->count ().dump (file);
753 	  fprintf (file, " (%.1f%%)", e->count ().to_gcov_type() * 100.0
754 		   / bb->count.to_gcov_type ());
755 	}
756     }
757 
758   fprintf (file, "\n");
759 
760   /* Print output that be easily read by analyze_brprob.py script. We are
761      interested only in counts that are read from GCDA files.  */
762   if (dump_file && (dump_flags & TDF_DETAILS)
763       && bb->count.precise_p ()
764       && reason == REASON_NONE)
765     {
766       gcc_assert (e->count ().precise_p ());
767       fprintf (file, ";;heuristics;%s;%" PRId64 ";%" PRId64 ";%.1f;\n",
768 	       predictor_info[predictor].name,
769 	       bb->count.to_gcov_type (), e->count ().to_gcov_type (),
770 	       probability * 100.0 / REG_BR_PROB_BASE);
771     }
772 }
773 
774 /* Return true if STMT is known to be unlikely executed.  */
775 
776 static bool
unlikely_executed_stmt_p(gimple * stmt)777 unlikely_executed_stmt_p (gimple *stmt)
778 {
779   if (!is_gimple_call (stmt))
780     return false;
781   /* NORETURN attribute alone is not strong enough: exit() may be quite
782      likely executed once during program run.  */
783   if (gimple_call_fntype (stmt)
784       && lookup_attribute ("cold",
785 			   TYPE_ATTRIBUTES (gimple_call_fntype (stmt)))
786       && !lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl)))
787     return true;
788   tree decl = gimple_call_fndecl (stmt);
789   if (!decl)
790     return false;
791   if (lookup_attribute ("cold", DECL_ATTRIBUTES (decl))
792       && !lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl)))
793     return true;
794 
795   cgraph_node *n = cgraph_node::get (decl);
796   if (!n)
797     return false;
798 
799   availability avail;
800   n = n->ultimate_alias_target (&avail);
801   if (avail < AVAIL_AVAILABLE)
802     return false;
803   if (!n->analyzed
804       || n->decl == current_function_decl)
805     return false;
806   return n->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED;
807 }
808 
809 /* Return true if BB is unlikely executed.  */
810 
811 static bool
unlikely_executed_bb_p(basic_block bb)812 unlikely_executed_bb_p (basic_block bb)
813 {
814   if (bb->count == profile_count::zero ())
815     return true;
816   if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
817     return false;
818   for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
819        !gsi_end_p (gsi); gsi_next (&gsi))
820     {
821       if (unlikely_executed_stmt_p (gsi_stmt (gsi)))
822         return true;
823       if (stmt_can_terminate_bb_p (gsi_stmt (gsi)))
824 	return false;
825     }
826   return false;
827 }
828 
829 /* We cannot predict the probabilities of outgoing edges of bb.  Set them
830    evenly and hope for the best.  If UNLIKELY_EDGES is not null, distribute
831    even probability for all edges not mentioned in the set.  These edges
832    are given PROB_VERY_UNLIKELY probability.  Similarly for LIKELY_EDGES,
833    if we have exactly one likely edge, make the other edges predicted
834    as not probable.  */
835 
836 static void
837 set_even_probabilities (basic_block bb,
838 			hash_set<edge> *unlikely_edges = NULL,
839 			hash_set<edge_prediction *> *likely_edges = NULL)
840 {
841   unsigned nedges = 0, unlikely_count = 0;
842   edge e = NULL;
843   edge_iterator ei;
844   profile_probability all = profile_probability::always ();
845 
846   FOR_EACH_EDGE (e, ei, bb->succs)
847     if (e->probability.initialized_p ())
848       all -= e->probability;
849     else if (!unlikely_executed_edge_p (e))
850       {
851 	nedges++;
852         if (unlikely_edges != NULL && unlikely_edges->contains (e))
853 	  {
854 	    all -= profile_probability::very_unlikely ();
855 	    unlikely_count++;
856 	  }
857       }
858 
859   /* Make the distribution even if all edges are unlikely.  */
860   unsigned likely_count = likely_edges ? likely_edges->elements () : 0;
861   if (unlikely_count == nedges)
862     {
863       unlikely_edges = NULL;
864       unlikely_count = 0;
865     }
866 
867   /* If we have one likely edge, then use its probability and distribute
868      remaining probabilities as even.  */
869   if (likely_count == 1)
870     {
871       FOR_EACH_EDGE (e, ei, bb->succs)
872 	if (e->probability.initialized_p ())
873 	  ;
874 	else if (!unlikely_executed_edge_p (e))
875 	  {
876 	    edge_prediction *prediction = *likely_edges->begin ();
877 	    int p = prediction->ep_probability;
878 	    profile_probability prob
879 	      = profile_probability::from_reg_br_prob_base (p);
880 
881 	    if (prediction->ep_edge == e)
882 	      e->probability = prob;
883 	    else if (unlikely_edges != NULL && unlikely_edges->contains (e))
884 	      e->probability = profile_probability::very_unlikely ();
885 	    else
886 	      {
887 		profile_probability remainder = prob.invert ();
888 		remainder -= profile_probability::very_unlikely ()
889 		  .apply_scale (unlikely_count, 1);
890 		int count = nedges - unlikely_count - 1;
891 		gcc_assert (count >= 0);
892 
893 		e->probability = remainder.apply_scale (1, count);
894 	      }
895 	  }
896 	else
897 	  e->probability = profile_probability::never ();
898     }
899   else
900     {
901       /* Make all unlikely edges unlikely and the rest will have even
902 	 probability.  */
903       unsigned scale = nedges - unlikely_count;
904       FOR_EACH_EDGE (e, ei, bb->succs)
905 	if (e->probability.initialized_p ())
906 	  ;
907 	else if (!unlikely_executed_edge_p (e))
908 	  {
909 	    if (unlikely_edges != NULL && unlikely_edges->contains (e))
910 	      e->probability = profile_probability::very_unlikely ();
911 	    else
912 	      e->probability = all.apply_scale (1, scale);
913 	  }
914 	else
915 	  e->probability = profile_probability::never ();
916     }
917 }
918 
919 /* Add REG_BR_PROB note to JUMP with PROB.  */
920 
921 void
add_reg_br_prob_note(rtx_insn * jump,profile_probability prob)922 add_reg_br_prob_note (rtx_insn *jump, profile_probability prob)
923 {
924   gcc_checking_assert (JUMP_P (jump) && !find_reg_note (jump, REG_BR_PROB, 0));
925   add_int_reg_note (jump, REG_BR_PROB, prob.to_reg_br_prob_note ());
926 }
927 
928 /* Combine all REG_BR_PRED notes into single probability and attach REG_BR_PROB
929    note if not already present.  Remove now useless REG_BR_PRED notes.  */
930 
931 static void
combine_predictions_for_insn(rtx_insn * insn,basic_block bb)932 combine_predictions_for_insn (rtx_insn *insn, basic_block bb)
933 {
934   rtx prob_note;
935   rtx *pnote;
936   rtx note;
937   int best_probability = PROB_EVEN;
938   enum br_predictor best_predictor = END_PREDICTORS;
939   int combined_probability = REG_BR_PROB_BASE / 2;
940   int d;
941   bool first_match = false;
942   bool found = false;
943 
944   if (!can_predict_insn_p (insn))
945     {
946       set_even_probabilities (bb);
947       return;
948     }
949 
950   prob_note = find_reg_note (insn, REG_BR_PROB, 0);
951   pnote = &REG_NOTES (insn);
952   if (dump_file)
953     fprintf (dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn),
954 	     bb->index);
955 
956   /* We implement "first match" heuristics and use probability guessed
957      by predictor with smallest index.  */
958   for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
959     if (REG_NOTE_KIND (note) == REG_BR_PRED)
960       {
961 	enum br_predictor predictor = ((enum br_predictor)
962 				       INTVAL (XEXP (XEXP (note, 0), 0)));
963 	int probability = INTVAL (XEXP (XEXP (note, 0), 1));
964 
965 	found = true;
966 	if (best_predictor > predictor
967 	    && predictor_info[predictor].flags & PRED_FLAG_FIRST_MATCH)
968 	  best_probability = probability, best_predictor = predictor;
969 
970 	d = (combined_probability * probability
971 	     + (REG_BR_PROB_BASE - combined_probability)
972 	     * (REG_BR_PROB_BASE - probability));
973 
974 	/* Use FP math to avoid overflows of 32bit integers.  */
975 	if (d == 0)
976 	  /* If one probability is 0% and one 100%, avoid division by zero.  */
977 	  combined_probability = REG_BR_PROB_BASE / 2;
978 	else
979 	  combined_probability = (((double) combined_probability) * probability
980 				  * REG_BR_PROB_BASE / d + 0.5);
981       }
982 
983   /* Decide which heuristic to use.  In case we didn't match anything,
984      use no_prediction heuristic, in case we did match, use either
985      first match or Dempster-Shaffer theory depending on the flags.  */
986 
987   if (best_predictor != END_PREDICTORS)
988     first_match = true;
989 
990   if (!found)
991     dump_prediction (dump_file, PRED_NO_PREDICTION,
992 		     combined_probability, bb);
993   else
994     {
995       if (!first_match)
996 	dump_prediction (dump_file, PRED_DS_THEORY, combined_probability,
997 			 bb, !first_match ? REASON_NONE : REASON_IGNORED);
998       else
999 	dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability,
1000 			 bb, first_match ? REASON_NONE : REASON_IGNORED);
1001     }
1002 
1003   if (first_match)
1004     combined_probability = best_probability;
1005   dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb);
1006 
1007   while (*pnote)
1008     {
1009       if (REG_NOTE_KIND (*pnote) == REG_BR_PRED)
1010 	{
1011 	  enum br_predictor predictor = ((enum br_predictor)
1012 					 INTVAL (XEXP (XEXP (*pnote, 0), 0)));
1013 	  int probability = INTVAL (XEXP (XEXP (*pnote, 0), 1));
1014 
1015 	  dump_prediction (dump_file, predictor, probability, bb,
1016 			   (!first_match || best_predictor == predictor)
1017 			   ? REASON_NONE : REASON_IGNORED);
1018 	  *pnote = XEXP (*pnote, 1);
1019 	}
1020       else
1021 	pnote = &XEXP (*pnote, 1);
1022     }
1023 
1024   if (!prob_note)
1025     {
1026       profile_probability p
1027 	 = profile_probability::from_reg_br_prob_base (combined_probability);
1028       add_reg_br_prob_note (insn, p);
1029 
1030       /* Save the prediction into CFG in case we are seeing non-degenerated
1031 	 conditional jump.  */
1032       if (!single_succ_p (bb))
1033 	{
1034 	  BRANCH_EDGE (bb)->probability = p;
1035 	  FALLTHRU_EDGE (bb)->probability
1036 	    = BRANCH_EDGE (bb)->probability.invert ();
1037 	}
1038     }
1039   else if (!single_succ_p (bb))
1040     {
1041       profile_probability prob = profile_probability::from_reg_br_prob_note
1042 					(XINT (prob_note, 0));
1043 
1044       BRANCH_EDGE (bb)->probability = prob;
1045       FALLTHRU_EDGE (bb)->probability = prob.invert ();
1046     }
1047   else
1048     single_succ_edge (bb)->probability = profile_probability::always ();
1049 }
1050 
1051 /* Edge prediction hash traits.  */
1052 
1053 struct predictor_hash: pointer_hash <edge_prediction>
1054 {
1055 
1056   static inline hashval_t hash (const edge_prediction *);
1057   static inline bool equal (const edge_prediction *, const edge_prediction *);
1058 };
1059 
1060 /* Calculate hash value of an edge prediction P based on predictor and
1061    normalized probability.  */
1062 
1063 inline hashval_t
hash(const edge_prediction * p)1064 predictor_hash::hash (const edge_prediction *p)
1065 {
1066   inchash::hash hstate;
1067   hstate.add_int (p->ep_predictor);
1068 
1069   int prob = p->ep_probability;
1070   if (prob > REG_BR_PROB_BASE / 2)
1071     prob = REG_BR_PROB_BASE - prob;
1072 
1073   hstate.add_int (prob);
1074 
1075   return hstate.end ();
1076 }
1077 
1078 /* Return true whether edge predictions P1 and P2 use the same predictor and
1079    have equal (or opposed probability).  */
1080 
1081 inline bool
equal(const edge_prediction * p1,const edge_prediction * p2)1082 predictor_hash::equal (const edge_prediction *p1, const edge_prediction *p2)
1083 {
1084   return (p1->ep_predictor == p2->ep_predictor
1085 	  && (p1->ep_probability == p2->ep_probability
1086 	      || p1->ep_probability == REG_BR_PROB_BASE - p2->ep_probability));
1087 }
1088 
1089 struct predictor_hash_traits: predictor_hash,
1090   typed_noop_remove <edge_prediction *> {};
1091 
1092 /* Return true if edge prediction P is not in DATA hash set.  */
1093 
1094 static bool
not_removed_prediction_p(edge_prediction * p,void * data)1095 not_removed_prediction_p (edge_prediction *p, void *data)
1096 {
1097   hash_set<edge_prediction *> *remove = (hash_set<edge_prediction *> *) data;
1098   return !remove->contains (p);
1099 }
1100 
1101 /* Prune predictions for a basic block BB.  Currently we do following
1102    clean-up steps:
1103 
1104    1) remove duplicate prediction that is guessed with the same probability
1105       (different than 1/2) to both edge
1106    2) remove duplicates for a prediction that belongs with the same probability
1107       to a single edge
1108 
1109   */
1110 
1111 static void
prune_predictions_for_bb(basic_block bb)1112 prune_predictions_for_bb (basic_block bb)
1113 {
1114   edge_prediction **preds = bb_predictions->get (bb);
1115 
1116   if (preds)
1117     {
1118       hash_table <predictor_hash_traits> s (13);
1119       hash_set <edge_prediction *> remove;
1120 
1121       /* Step 1: identify predictors that should be removed.  */
1122       for (edge_prediction *pred = *preds; pred; pred = pred->ep_next)
1123 	{
1124 	  edge_prediction *existing = s.find (pred);
1125 	  if (existing)
1126 	    {
1127 	      if (pred->ep_edge == existing->ep_edge
1128 		  && pred->ep_probability == existing->ep_probability)
1129 		{
1130 		  /* Remove a duplicate predictor.  */
1131 		  dump_prediction (dump_file, pred->ep_predictor,
1132 				   pred->ep_probability, bb,
1133 				   REASON_SINGLE_EDGE_DUPLICATE, pred->ep_edge);
1134 
1135 		  remove.add (pred);
1136 		}
1137 	      else if (pred->ep_edge != existing->ep_edge
1138 		       && pred->ep_probability == existing->ep_probability
1139 		       && pred->ep_probability != REG_BR_PROB_BASE / 2)
1140 		{
1141 		  /* Remove both predictors as they predict the same
1142 		     for both edges.  */
1143 		  dump_prediction (dump_file, existing->ep_predictor,
1144 				   pred->ep_probability, bb,
1145 				   REASON_EDGE_PAIR_DUPLICATE,
1146 				   existing->ep_edge);
1147 		  dump_prediction (dump_file, pred->ep_predictor,
1148 				   pred->ep_probability, bb,
1149 				   REASON_EDGE_PAIR_DUPLICATE,
1150 				   pred->ep_edge);
1151 
1152 		  remove.add (existing);
1153 		  remove.add (pred);
1154 		}
1155 	    }
1156 
1157 	  edge_prediction **slot2 = s.find_slot (pred, INSERT);
1158 	  *slot2 = pred;
1159 	}
1160 
1161       /* Step 2: Remove predictors.  */
1162       filter_predictions (preds, not_removed_prediction_p, &remove);
1163     }
1164 }
1165 
1166 /* Combine predictions into single probability and store them into CFG.
1167    Remove now useless prediction entries.
1168    If DRY_RUN is set, only produce dumps and do not modify profile.  */
1169 
1170 static void
combine_predictions_for_bb(basic_block bb,bool dry_run)1171 combine_predictions_for_bb (basic_block bb, bool dry_run)
1172 {
1173   int best_probability = PROB_EVEN;
1174   enum br_predictor best_predictor = END_PREDICTORS;
1175   int combined_probability = REG_BR_PROB_BASE / 2;
1176   int d;
1177   bool first_match = false;
1178   bool found = false;
1179   struct edge_prediction *pred;
1180   int nedges = 0;
1181   edge e, first = NULL, second = NULL;
1182   edge_iterator ei;
1183   int nzero = 0;
1184   int nunknown = 0;
1185 
1186   FOR_EACH_EDGE (e, ei, bb->succs)
1187     {
1188       if (!unlikely_executed_edge_p (e))
1189         {
1190 	  nedges ++;
1191 	  if (first && !second)
1192 	    second = e;
1193 	  if (!first)
1194 	    first = e;
1195         }
1196       else if (!e->probability.initialized_p ())
1197         e->probability = profile_probability::never ();
1198      if (!e->probability.initialized_p ())
1199         nunknown++;
1200      else if (e->probability == profile_probability::never ())
1201 	nzero++;
1202     }
1203 
1204   /* When there is no successor or only one choice, prediction is easy.
1205 
1206      When we have a basic block with more than 2 successors, the situation
1207      is more complicated as DS theory cannot be used literally.
1208      More precisely, let's assume we predicted edge e1 with probability p1,
1209      thus: m1({b1}) = p1.  As we're going to combine more than 2 edges, we
1210      need to find probability of e.g. m1({b2}), which we don't know.
1211      The only approximation is to equally distribute 1-p1 to all edges
1212      different from b1.
1213 
1214      According to numbers we've got from SPEC2006 benchark, there's only
1215      one interesting reliable predictor (noreturn call), which can be
1216      handled with a bit easier approach.  */
1217   if (nedges != 2)
1218     {
1219       hash_set<edge> unlikely_edges (4);
1220       hash_set<edge_prediction *> likely_edges (4);
1221 
1222       /* Identify all edges that have a probability close to very unlikely.
1223 	 Doing the approach for very unlikely doesn't worth for doing as
1224 	 there's no such probability in SPEC2006 benchmark.  */
1225       edge_prediction **preds = bb_predictions->get (bb);
1226       if (preds)
1227 	for (pred = *preds; pred; pred = pred->ep_next)
1228 	  {
1229 	    if (pred->ep_probability <= PROB_VERY_UNLIKELY
1230 		|| pred->ep_predictor == PRED_COLD_LABEL)
1231 	      unlikely_edges.add (pred->ep_edge);
1232 	    else if (pred->ep_probability >= PROB_VERY_LIKELY
1233 		     || pred->ep_predictor == PRED_BUILTIN_EXPECT
1234 		     || pred->ep_predictor == PRED_HOT_LABEL)
1235 	      likely_edges.add (pred);
1236 	  }
1237 
1238       /* It can happen that an edge is both in likely_edges and unlikely_edges.
1239 	 Clear both sets in that situation.  */
1240       for (hash_set<edge_prediction *>::iterator it = likely_edges.begin ();
1241 	   it != likely_edges.end (); ++it)
1242 	if (unlikely_edges.contains ((*it)->ep_edge))
1243 	  {
1244 	    likely_edges.empty ();
1245 	    unlikely_edges.empty ();
1246 	    break;
1247 	  }
1248 
1249       if (!dry_run)
1250 	set_even_probabilities (bb, &unlikely_edges, &likely_edges);
1251       clear_bb_predictions (bb);
1252       if (dump_file)
1253 	{
1254 	  fprintf (dump_file, "Predictions for bb %i\n", bb->index);
1255 	  if (unlikely_edges.elements () == 0)
1256 	    fprintf (dump_file,
1257 		     "%i edges in bb %i predicted to even probabilities\n",
1258 		     nedges, bb->index);
1259 	  else
1260 	    {
1261 	      fprintf (dump_file,
1262 		       "%i edges in bb %i predicted with some unlikely edges\n",
1263 		       nedges, bb->index);
1264 	      FOR_EACH_EDGE (e, ei, bb->succs)
1265 		if (!unlikely_executed_edge_p (e))
1266 		  dump_prediction (dump_file, PRED_COMBINED,
1267 		   e->probability.to_reg_br_prob_base (), bb, REASON_NONE, e);
1268 	    }
1269 	}
1270       return;
1271     }
1272 
1273   if (dump_file)
1274     fprintf (dump_file, "Predictions for bb %i\n", bb->index);
1275 
1276   prune_predictions_for_bb (bb);
1277 
1278   edge_prediction **preds = bb_predictions->get (bb);
1279 
1280   if (preds)
1281     {
1282       /* We implement "first match" heuristics and use probability guessed
1283 	 by predictor with smallest index.  */
1284       for (pred = *preds; pred; pred = pred->ep_next)
1285 	{
1286 	  enum br_predictor predictor = pred->ep_predictor;
1287 	  int probability = pred->ep_probability;
1288 
1289 	  if (pred->ep_edge != first)
1290 	    probability = REG_BR_PROB_BASE - probability;
1291 
1292 	  found = true;
1293 	  /* First match heuristics would be widly confused if we predicted
1294 	     both directions.  */
1295 	  if (best_predictor > predictor
1296 	    && predictor_info[predictor].flags & PRED_FLAG_FIRST_MATCH)
1297 	    {
1298               struct edge_prediction *pred2;
1299 	      int prob = probability;
1300 
1301 	      for (pred2 = (struct edge_prediction *) *preds;
1302 		   pred2; pred2 = pred2->ep_next)
1303 	       if (pred2 != pred && pred2->ep_predictor == pred->ep_predictor)
1304 	         {
1305 		   int probability2 = pred2->ep_probability;
1306 
1307 		   if (pred2->ep_edge != first)
1308 		     probability2 = REG_BR_PROB_BASE - probability2;
1309 
1310 		   if ((probability < REG_BR_PROB_BASE / 2) !=
1311 		       (probability2 < REG_BR_PROB_BASE / 2))
1312 		     break;
1313 
1314 		   /* If the same predictor later gave better result, go for it! */
1315 		   if ((probability >= REG_BR_PROB_BASE / 2 && (probability2 > probability))
1316 		       || (probability <= REG_BR_PROB_BASE / 2 && (probability2 < probability)))
1317 		     prob = probability2;
1318 		 }
1319 	      if (!pred2)
1320 	        best_probability = prob, best_predictor = predictor;
1321 	    }
1322 
1323 	  d = (combined_probability * probability
1324 	       + (REG_BR_PROB_BASE - combined_probability)
1325 	       * (REG_BR_PROB_BASE - probability));
1326 
1327 	  /* Use FP math to avoid overflows of 32bit integers.  */
1328 	  if (d == 0)
1329 	    /* If one probability is 0% and one 100%, avoid division by zero.  */
1330 	    combined_probability = REG_BR_PROB_BASE / 2;
1331 	  else
1332 	    combined_probability = (((double) combined_probability)
1333 				    * probability
1334 		    		    * REG_BR_PROB_BASE / d + 0.5);
1335 	}
1336     }
1337 
1338   /* Decide which heuristic to use.  In case we didn't match anything,
1339      use no_prediction heuristic, in case we did match, use either
1340      first match or Dempster-Shaffer theory depending on the flags.  */
1341 
1342   if (best_predictor != END_PREDICTORS)
1343     first_match = true;
1344 
1345   if (!found)
1346     dump_prediction (dump_file, PRED_NO_PREDICTION, combined_probability, bb);
1347   else
1348     {
1349       if (!first_match)
1350 	dump_prediction (dump_file, PRED_DS_THEORY, combined_probability, bb,
1351 			 !first_match ? REASON_NONE : REASON_IGNORED);
1352       else
1353 	dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability, bb,
1354 			 first_match ? REASON_NONE : REASON_IGNORED);
1355     }
1356 
1357   if (first_match)
1358     combined_probability = best_probability;
1359   dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb);
1360 
1361   if (preds)
1362     {
1363       for (pred = (struct edge_prediction *) *preds; pred; pred = pred->ep_next)
1364 	{
1365 	  enum br_predictor predictor = pred->ep_predictor;
1366 	  int probability = pred->ep_probability;
1367 
1368 	  dump_prediction (dump_file, predictor, probability, bb,
1369 			   (!first_match || best_predictor == predictor)
1370 			   ? REASON_NONE : REASON_IGNORED, pred->ep_edge);
1371 	}
1372     }
1373   clear_bb_predictions (bb);
1374 
1375 
1376   /* If we have only one successor which is unknown, we can compute missing
1377      probablity.  */
1378   if (nunknown == 1)
1379     {
1380       profile_probability prob = profile_probability::always ();
1381       edge missing = NULL;
1382 
1383       FOR_EACH_EDGE (e, ei, bb->succs)
1384 	if (e->probability.initialized_p ())
1385 	  prob -= e->probability;
1386 	else if (missing == NULL)
1387 	  missing = e;
1388 	else
1389 	  gcc_unreachable ();
1390        missing->probability = prob;
1391     }
1392   /* If nothing is unknown, we have nothing to update.  */
1393   else if (!nunknown && nzero != (int)EDGE_COUNT (bb->succs))
1394     ;
1395   else if (!dry_run)
1396     {
1397       first->probability
1398 	 = profile_probability::from_reg_br_prob_base (combined_probability);
1399       second->probability = first->probability.invert ();
1400     }
1401 }
1402 
1403 /* Check if T1 and T2 satisfy the IV_COMPARE condition.
1404    Return the SSA_NAME if the condition satisfies, NULL otherwise.
1405 
1406    T1 and T2 should be one of the following cases:
1407      1. T1 is SSA_NAME, T2 is NULL
1408      2. T1 is SSA_NAME, T2 is INTEGER_CST between [-4, 4]
1409      3. T2 is SSA_NAME, T1 is INTEGER_CST between [-4, 4]  */
1410 
1411 static tree
strips_small_constant(tree t1,tree t2)1412 strips_small_constant (tree t1, tree t2)
1413 {
1414   tree ret = NULL;
1415   int value = 0;
1416 
1417   if (!t1)
1418     return NULL;
1419   else if (TREE_CODE (t1) == SSA_NAME)
1420     ret = t1;
1421   else if (tree_fits_shwi_p (t1))
1422     value = tree_to_shwi (t1);
1423   else
1424     return NULL;
1425 
1426   if (!t2)
1427     return ret;
1428   else if (tree_fits_shwi_p (t2))
1429     value = tree_to_shwi (t2);
1430   else if (TREE_CODE (t2) == SSA_NAME)
1431     {
1432       if (ret)
1433         return NULL;
1434       else
1435         ret = t2;
1436     }
1437 
1438   if (value <= 4 && value >= -4)
1439     return ret;
1440   else
1441     return NULL;
1442 }
1443 
1444 /* Return the SSA_NAME in T or T's operands.
1445    Return NULL if SSA_NAME cannot be found.  */
1446 
1447 static tree
get_base_value(tree t)1448 get_base_value (tree t)
1449 {
1450   if (TREE_CODE (t) == SSA_NAME)
1451     return t;
1452 
1453   if (!BINARY_CLASS_P (t))
1454     return NULL;
1455 
1456   switch (TREE_OPERAND_LENGTH (t))
1457     {
1458     case 1:
1459       return strips_small_constant (TREE_OPERAND (t, 0), NULL);
1460     case 2:
1461       return strips_small_constant (TREE_OPERAND (t, 0),
1462 				    TREE_OPERAND (t, 1));
1463     default:
1464       return NULL;
1465     }
1466 }
1467 
1468 /* Check the compare STMT in LOOP. If it compares an induction
1469    variable to a loop invariant, return true, and save
1470    LOOP_INVARIANT, COMPARE_CODE and LOOP_STEP.
1471    Otherwise return false and set LOOP_INVAIANT to NULL.  */
1472 
1473 static bool
is_comparison_with_loop_invariant_p(gcond * stmt,struct loop * loop,tree * loop_invariant,enum tree_code * compare_code,tree * loop_step,tree * loop_iv_base)1474 is_comparison_with_loop_invariant_p (gcond *stmt, struct loop *loop,
1475 				     tree *loop_invariant,
1476 				     enum tree_code *compare_code,
1477 				     tree *loop_step,
1478 				     tree *loop_iv_base)
1479 {
1480   tree op0, op1, bound, base;
1481   affine_iv iv0, iv1;
1482   enum tree_code code;
1483   tree step;
1484 
1485   code = gimple_cond_code (stmt);
1486   *loop_invariant = NULL;
1487 
1488   switch (code)
1489     {
1490     case GT_EXPR:
1491     case GE_EXPR:
1492     case NE_EXPR:
1493     case LT_EXPR:
1494     case LE_EXPR:
1495     case EQ_EXPR:
1496       break;
1497 
1498     default:
1499       return false;
1500     }
1501 
1502   op0 = gimple_cond_lhs (stmt);
1503   op1 = gimple_cond_rhs (stmt);
1504 
1505   if ((TREE_CODE (op0) != SSA_NAME && TREE_CODE (op0) != INTEGER_CST)
1506        || (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op1) != INTEGER_CST))
1507     return false;
1508   if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, true))
1509     return false;
1510   if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, true))
1511     return false;
1512   if (TREE_CODE (iv0.step) != INTEGER_CST
1513       || TREE_CODE (iv1.step) != INTEGER_CST)
1514     return false;
1515   if ((integer_zerop (iv0.step) && integer_zerop (iv1.step))
1516       || (!integer_zerop (iv0.step) && !integer_zerop (iv1.step)))
1517     return false;
1518 
1519   if (integer_zerop (iv0.step))
1520     {
1521       if (code != NE_EXPR && code != EQ_EXPR)
1522 	code = invert_tree_comparison (code, false);
1523       bound = iv0.base;
1524       base = iv1.base;
1525       if (tree_fits_shwi_p (iv1.step))
1526 	step = iv1.step;
1527       else
1528 	return false;
1529     }
1530   else
1531     {
1532       bound = iv1.base;
1533       base = iv0.base;
1534       if (tree_fits_shwi_p (iv0.step))
1535 	step = iv0.step;
1536       else
1537 	return false;
1538     }
1539 
1540   if (TREE_CODE (bound) != INTEGER_CST)
1541     bound = get_base_value (bound);
1542   if (!bound)
1543     return false;
1544   if (TREE_CODE (base) != INTEGER_CST)
1545     base = get_base_value (base);
1546   if (!base)
1547     return false;
1548 
1549   *loop_invariant = bound;
1550   *compare_code = code;
1551   *loop_step = step;
1552   *loop_iv_base = base;
1553   return true;
1554 }
1555 
1556 /* Compare two SSA_NAMEs: returns TRUE if T1 and T2 are value coherent.  */
1557 
1558 static bool
expr_coherent_p(tree t1,tree t2)1559 expr_coherent_p (tree t1, tree t2)
1560 {
1561   gimple *stmt;
1562   tree ssa_name_1 = NULL;
1563   tree ssa_name_2 = NULL;
1564 
1565   gcc_assert (TREE_CODE (t1) == SSA_NAME || TREE_CODE (t1) == INTEGER_CST);
1566   gcc_assert (TREE_CODE (t2) == SSA_NAME || TREE_CODE (t2) == INTEGER_CST);
1567 
1568   if (t1 == t2)
1569     return true;
1570 
1571   if (TREE_CODE (t1) == INTEGER_CST && TREE_CODE (t2) == INTEGER_CST)
1572     return true;
1573   if (TREE_CODE (t1) == INTEGER_CST || TREE_CODE (t2) == INTEGER_CST)
1574     return false;
1575 
1576   /* Check to see if t1 is expressed/defined with t2.  */
1577   stmt = SSA_NAME_DEF_STMT (t1);
1578   gcc_assert (stmt != NULL);
1579   if (is_gimple_assign (stmt))
1580     {
1581       ssa_name_1 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1582       if (ssa_name_1 && ssa_name_1 == t2)
1583 	return true;
1584     }
1585 
1586   /* Check to see if t2 is expressed/defined with t1.  */
1587   stmt = SSA_NAME_DEF_STMT (t2);
1588   gcc_assert (stmt != NULL);
1589   if (is_gimple_assign (stmt))
1590     {
1591       ssa_name_2 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1592       if (ssa_name_2 && ssa_name_2 == t1)
1593 	return true;
1594     }
1595 
1596   /* Compare if t1 and t2's def_stmts are identical.  */
1597   if (ssa_name_2 != NULL && ssa_name_1 == ssa_name_2)
1598     return true;
1599   else
1600     return false;
1601 }
1602 
1603 /* Return true if E is predicted by one of loop heuristics.  */
1604 
1605 static bool
predicted_by_loop_heuristics_p(basic_block bb)1606 predicted_by_loop_heuristics_p (basic_block bb)
1607 {
1608   struct edge_prediction *i;
1609   edge_prediction **preds = bb_predictions->get (bb);
1610 
1611   if (!preds)
1612     return false;
1613 
1614   for (i = *preds; i; i = i->ep_next)
1615     if (i->ep_predictor == PRED_LOOP_ITERATIONS_GUESSED
1616 	|| i->ep_predictor == PRED_LOOP_ITERATIONS_MAX
1617 	|| i->ep_predictor == PRED_LOOP_ITERATIONS
1618 	|| i->ep_predictor == PRED_LOOP_EXIT
1619 	|| i->ep_predictor == PRED_LOOP_EXIT_WITH_RECURSION
1620 	|| i->ep_predictor == PRED_LOOP_EXTRA_EXIT)
1621       return true;
1622   return false;
1623 }
1624 
1625 /* Predict branch probability of BB when BB contains a branch that compares
1626    an induction variable in LOOP with LOOP_IV_BASE_VAR to LOOP_BOUND_VAR. The
1627    loop exit is compared using LOOP_BOUND_CODE, with step of LOOP_BOUND_STEP.
1628 
1629    E.g.
1630      for (int i = 0; i < bound; i++) {
1631        if (i < bound - 2)
1632 	 computation_1();
1633        else
1634 	 computation_2();
1635      }
1636 
1637   In this loop, we will predict the branch inside the loop to be taken.  */
1638 
1639 static void
predict_iv_comparison(struct loop * loop,basic_block bb,tree loop_bound_var,tree loop_iv_base_var,enum tree_code loop_bound_code,int loop_bound_step)1640 predict_iv_comparison (struct loop *loop, basic_block bb,
1641 		       tree loop_bound_var,
1642 		       tree loop_iv_base_var,
1643 		       enum tree_code loop_bound_code,
1644 		       int loop_bound_step)
1645 {
1646   gimple *stmt;
1647   tree compare_var, compare_base;
1648   enum tree_code compare_code;
1649   tree compare_step_var;
1650   edge then_edge;
1651   edge_iterator ei;
1652 
1653   if (predicted_by_loop_heuristics_p (bb))
1654     return;
1655 
1656   stmt = last_stmt (bb);
1657   if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1658     return;
1659   if (!is_comparison_with_loop_invariant_p (as_a <gcond *> (stmt),
1660 					    loop, &compare_var,
1661 					    &compare_code,
1662 					    &compare_step_var,
1663 					    &compare_base))
1664     return;
1665 
1666   /* Find the taken edge.  */
1667   FOR_EACH_EDGE (then_edge, ei, bb->succs)
1668     if (then_edge->flags & EDGE_TRUE_VALUE)
1669       break;
1670 
1671   /* When comparing an IV to a loop invariant, NE is more likely to be
1672      taken while EQ is more likely to be not-taken.  */
1673   if (compare_code == NE_EXPR)
1674     {
1675       predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1676       return;
1677     }
1678   else if (compare_code == EQ_EXPR)
1679     {
1680       predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1681       return;
1682     }
1683 
1684   if (!expr_coherent_p (loop_iv_base_var, compare_base))
1685     return;
1686 
1687   /* If loop bound, base and compare bound are all constants, we can
1688      calculate the probability directly.  */
1689   if (tree_fits_shwi_p (loop_bound_var)
1690       && tree_fits_shwi_p (compare_var)
1691       && tree_fits_shwi_p (compare_base))
1692     {
1693       int probability;
1694       wi::overflow_type overflow;
1695       bool overall_overflow = false;
1696       widest_int compare_count, tem;
1697 
1698       /* (loop_bound - base) / compare_step */
1699       tem = wi::sub (wi::to_widest (loop_bound_var),
1700 		     wi::to_widest (compare_base), SIGNED, &overflow);
1701       overall_overflow |= overflow;
1702       widest_int loop_count = wi::div_trunc (tem,
1703 					     wi::to_widest (compare_step_var),
1704 					     SIGNED, &overflow);
1705       overall_overflow |= overflow;
1706 
1707       if (!wi::neg_p (wi::to_widest (compare_step_var))
1708           ^ (compare_code == LT_EXPR || compare_code == LE_EXPR))
1709 	{
1710 	  /* (loop_bound - compare_bound) / compare_step */
1711 	  tem = wi::sub (wi::to_widest (loop_bound_var),
1712 			 wi::to_widest (compare_var), SIGNED, &overflow);
1713 	  overall_overflow |= overflow;
1714 	  compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1715 					 SIGNED, &overflow);
1716 	  overall_overflow |= overflow;
1717 	}
1718       else
1719         {
1720 	  /* (compare_bound - base) / compare_step */
1721 	  tem = wi::sub (wi::to_widest (compare_var),
1722 			 wi::to_widest (compare_base), SIGNED, &overflow);
1723 	  overall_overflow |= overflow;
1724           compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1725 					 SIGNED, &overflow);
1726 	  overall_overflow |= overflow;
1727 	}
1728       if (compare_code == LE_EXPR || compare_code == GE_EXPR)
1729 	++compare_count;
1730       if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR)
1731 	++loop_count;
1732       if (wi::neg_p (compare_count))
1733         compare_count = 0;
1734       if (wi::neg_p (loop_count))
1735         loop_count = 0;
1736       if (loop_count == 0)
1737 	probability = 0;
1738       else if (wi::cmps (compare_count, loop_count) == 1)
1739 	probability = REG_BR_PROB_BASE;
1740       else
1741         {
1742 	  tem = compare_count * REG_BR_PROB_BASE;
1743 	  tem = wi::udiv_trunc (tem, loop_count);
1744 	  probability = tem.to_uhwi ();
1745 	}
1746 
1747       /* FIXME: The branch prediction seems broken. It has only 20% hitrate.  */
1748       if (!overall_overflow)
1749         predict_edge (then_edge, PRED_LOOP_IV_COMPARE, probability);
1750 
1751       return;
1752     }
1753 
1754   if (expr_coherent_p (loop_bound_var, compare_var))
1755     {
1756       if ((loop_bound_code == LT_EXPR || loop_bound_code == LE_EXPR)
1757 	  && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1758 	predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1759       else if ((loop_bound_code == GT_EXPR || loop_bound_code == GE_EXPR)
1760 	       && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1761 	predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1762       else if (loop_bound_code == NE_EXPR)
1763 	{
1764 	  /* If the loop backedge condition is "(i != bound)", we do
1765 	     the comparison based on the step of IV:
1766 	     * step < 0 : backedge condition is like (i > bound)
1767 	     * step > 0 : backedge condition is like (i < bound)  */
1768 	  gcc_assert (loop_bound_step != 0);
1769 	  if (loop_bound_step > 0
1770 	      && (compare_code == LT_EXPR
1771 		  || compare_code == LE_EXPR))
1772 	    predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1773 	  else if (loop_bound_step < 0
1774 		   && (compare_code == GT_EXPR
1775 		       || compare_code == GE_EXPR))
1776 	    predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1777 	  else
1778 	    predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1779 	}
1780       else
1781 	/* The branch is predicted not-taken if loop_bound_code is
1782 	   opposite with compare_code.  */
1783 	predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1784     }
1785   else if (expr_coherent_p (loop_iv_base_var, compare_var))
1786     {
1787       /* For cases like:
1788 	   for (i = s; i < h; i++)
1789 	     if (i > s + 2) ....
1790 	 The branch should be predicted taken.  */
1791       if (loop_bound_step > 0
1792 	  && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1793 	predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1794       else if (loop_bound_step < 0
1795 	       && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1796 	predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1797       else
1798 	predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1799     }
1800 }
1801 
1802 /* Predict for extra loop exits that will lead to EXIT_EDGE. The extra loop
1803    exits are resulted from short-circuit conditions that will generate an
1804    if_tmp. E.g.:
1805 
1806    if (foo() || global > 10)
1807      break;
1808 
1809    This will be translated into:
1810 
1811    BB3:
1812      loop header...
1813    BB4:
1814      if foo() goto BB6 else goto BB5
1815    BB5:
1816      if global > 10 goto BB6 else goto BB7
1817    BB6:
1818      goto BB7
1819    BB7:
1820      iftmp = (PHI 0(BB5), 1(BB6))
1821      if iftmp == 1 goto BB8 else goto BB3
1822    BB8:
1823      outside of the loop...
1824 
1825    The edge BB7->BB8 is loop exit because BB8 is outside of the loop.
1826    From the dataflow, we can infer that BB4->BB6 and BB5->BB6 are also loop
1827    exits. This function takes BB7->BB8 as input, and finds out the extra loop
1828    exits to predict them using PRED_LOOP_EXTRA_EXIT.  */
1829 
1830 static void
predict_extra_loop_exits(edge exit_edge)1831 predict_extra_loop_exits (edge exit_edge)
1832 {
1833   unsigned i;
1834   bool check_value_one;
1835   gimple *lhs_def_stmt;
1836   gphi *phi_stmt;
1837   tree cmp_rhs, cmp_lhs;
1838   gimple *last;
1839   gcond *cmp_stmt;
1840 
1841   last = last_stmt (exit_edge->src);
1842   if (!last)
1843     return;
1844   cmp_stmt = dyn_cast <gcond *> (last);
1845   if (!cmp_stmt)
1846     return;
1847 
1848   cmp_rhs = gimple_cond_rhs (cmp_stmt);
1849   cmp_lhs = gimple_cond_lhs (cmp_stmt);
1850   if (!TREE_CONSTANT (cmp_rhs)
1851       || !(integer_zerop (cmp_rhs) || integer_onep (cmp_rhs)))
1852     return;
1853   if (TREE_CODE (cmp_lhs) != SSA_NAME)
1854     return;
1855 
1856   /* If check_value_one is true, only the phi_args with value '1' will lead
1857      to loop exit. Otherwise, only the phi_args with value '0' will lead to
1858      loop exit.  */
1859   check_value_one = (((integer_onep (cmp_rhs))
1860 		    ^ (gimple_cond_code (cmp_stmt) == EQ_EXPR))
1861 		    ^ ((exit_edge->flags & EDGE_TRUE_VALUE) != 0));
1862 
1863   lhs_def_stmt = SSA_NAME_DEF_STMT (cmp_lhs);
1864   if (!lhs_def_stmt)
1865     return;
1866 
1867   phi_stmt = dyn_cast <gphi *> (lhs_def_stmt);
1868   if (!phi_stmt)
1869     return;
1870 
1871   for (i = 0; i < gimple_phi_num_args (phi_stmt); i++)
1872     {
1873       edge e1;
1874       edge_iterator ei;
1875       tree val = gimple_phi_arg_def (phi_stmt, i);
1876       edge e = gimple_phi_arg_edge (phi_stmt, i);
1877 
1878       if (!TREE_CONSTANT (val) || !(integer_zerop (val) || integer_onep (val)))
1879 	continue;
1880       if ((check_value_one ^ integer_onep (val)) == 1)
1881 	continue;
1882       if (EDGE_COUNT (e->src->succs) != 1)
1883 	{
1884 	  predict_paths_leading_to_edge (e, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN);
1885 	  continue;
1886 	}
1887 
1888       FOR_EACH_EDGE (e1, ei, e->src->preds)
1889 	predict_paths_leading_to_edge (e1, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN);
1890     }
1891 }
1892 
1893 
1894 /* Predict edge probabilities by exploiting loop structure.  */
1895 
1896 static void
predict_loops(void)1897 predict_loops (void)
1898 {
1899   struct loop *loop;
1900   basic_block bb;
1901   hash_set <struct loop *> with_recursion(10);
1902 
1903   FOR_EACH_BB_FN (bb, cfun)
1904     {
1905       gimple_stmt_iterator gsi;
1906       tree decl;
1907 
1908       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1909 	if (is_gimple_call (gsi_stmt (gsi))
1910 	    && (decl = gimple_call_fndecl (gsi_stmt (gsi))) != NULL
1911 	    && recursive_call_p (current_function_decl, decl))
1912 	  {
1913 	    loop = bb->loop_father;
1914 	    while (loop && !with_recursion.add (loop))
1915 	      loop = loop_outer (loop);
1916 	  }
1917     }
1918 
1919   /* Try to predict out blocks in a loop that are not part of a
1920      natural loop.  */
1921   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1922     {
1923       basic_block bb, *bbs;
1924       unsigned j, n_exits = 0;
1925       vec<edge> exits;
1926       struct tree_niter_desc niter_desc;
1927       edge ex;
1928       struct nb_iter_bound *nb_iter;
1929       enum tree_code loop_bound_code = ERROR_MARK;
1930       tree loop_bound_step = NULL;
1931       tree loop_bound_var = NULL;
1932       tree loop_iv_base = NULL;
1933       gcond *stmt = NULL;
1934       bool recursion = with_recursion.contains (loop);
1935 
1936       exits = get_loop_exit_edges (loop);
1937       FOR_EACH_VEC_ELT (exits, j, ex)
1938 	if (!unlikely_executed_edge_p (ex) && !(ex->flags & EDGE_ABNORMAL_CALL))
1939 	  n_exits ++;
1940       if (!n_exits)
1941 	{
1942           exits.release ();
1943 	  continue;
1944 	}
1945 
1946       if (dump_file && (dump_flags & TDF_DETAILS))
1947 	fprintf (dump_file, "Predicting loop %i%s with %i exits.\n",
1948 		 loop->num, recursion ? " (with recursion)":"", n_exits);
1949       if (dump_file && (dump_flags & TDF_DETAILS)
1950 	  && max_loop_iterations_int (loop) >= 0)
1951 	{
1952 	  fprintf (dump_file,
1953 		   "Loop %d iterates at most %i times.\n", loop->num,
1954 		   (int)max_loop_iterations_int (loop));
1955 	}
1956       if (dump_file && (dump_flags & TDF_DETAILS)
1957 	  && likely_max_loop_iterations_int (loop) >= 0)
1958 	{
1959 	  fprintf (dump_file, "Loop %d likely iterates at most %i times.\n",
1960 		   loop->num, (int)likely_max_loop_iterations_int (loop));
1961 	}
1962 
1963       FOR_EACH_VEC_ELT (exits, j, ex)
1964 	{
1965 	  tree niter = NULL;
1966 	  HOST_WIDE_INT nitercst;
1967 	  int max = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
1968 	  int probability;
1969 	  enum br_predictor predictor;
1970 	  widest_int nit;
1971 
1972 	  if (unlikely_executed_edge_p (ex)
1973 	      || (ex->flags & EDGE_ABNORMAL_CALL))
1974 	    continue;
1975 	  /* Loop heuristics do not expect exit conditional to be inside
1976 	     inner loop.  We predict from innermost to outermost loop.  */
1977 	  if (predicted_by_loop_heuristics_p (ex->src))
1978 	    {
1979 	      if (dump_file && (dump_flags & TDF_DETAILS))
1980 		fprintf (dump_file, "Skipping exit %i->%i because "
1981 			 "it is already predicted.\n",
1982 			 ex->src->index, ex->dest->index);
1983 	      continue;
1984 	    }
1985 	  predict_extra_loop_exits (ex);
1986 
1987 	  if (number_of_iterations_exit (loop, ex, &niter_desc, false, false))
1988 	    niter = niter_desc.niter;
1989 	  if (!niter || TREE_CODE (niter_desc.niter) != INTEGER_CST)
1990 	    niter = loop_niter_by_eval (loop, ex);
1991 	  if (dump_file && (dump_flags & TDF_DETAILS)
1992 	      && TREE_CODE (niter) == INTEGER_CST)
1993 	    {
1994 	      fprintf (dump_file, "Exit %i->%i %d iterates ",
1995 		       ex->src->index, ex->dest->index,
1996 		       loop->num);
1997 	      print_generic_expr (dump_file, niter, TDF_SLIM);
1998 	      fprintf (dump_file, " times.\n");
1999 	    }
2000 
2001 	  if (TREE_CODE (niter) == INTEGER_CST)
2002 	    {
2003 	      if (tree_fits_uhwi_p (niter)
2004 		  && max
2005 		  && compare_tree_int (niter, max - 1) == -1)
2006 		nitercst = tree_to_uhwi (niter) + 1;
2007 	      else
2008 		nitercst = max;
2009 	      predictor = PRED_LOOP_ITERATIONS;
2010 	    }
2011 	  /* If we have just one exit and we can derive some information about
2012 	     the number of iterations of the loop from the statements inside
2013 	     the loop, use it to predict this exit.  */
2014 	  else if (n_exits == 1
2015 		   && estimated_stmt_executions (loop, &nit))
2016 	    {
2017 	      if (wi::gtu_p (nit, max))
2018 		nitercst = max;
2019 	      else
2020 		nitercst = nit.to_shwi ();
2021 	      predictor = PRED_LOOP_ITERATIONS_GUESSED;
2022 	    }
2023 	  /* If we have likely upper bound, trust it for very small iteration
2024 	     counts.  Such loops would otherwise get mispredicted by standard
2025 	     LOOP_EXIT heuristics.  */
2026 	  else if (n_exits == 1
2027 		   && likely_max_stmt_executions (loop, &nit)
2028 		   && wi::ltu_p (nit,
2029 				 RDIV (REG_BR_PROB_BASE,
2030 				       REG_BR_PROB_BASE
2031 					 - predictor_info
2032 						 [recursion
2033 						  ? PRED_LOOP_EXIT_WITH_RECURSION
2034 						  : PRED_LOOP_EXIT].hitrate)))
2035 	    {
2036 	      nitercst = nit.to_shwi ();
2037 	      predictor = PRED_LOOP_ITERATIONS_MAX;
2038 	    }
2039 	  else
2040 	    {
2041 	      if (dump_file && (dump_flags & TDF_DETAILS))
2042 		fprintf (dump_file, "Nothing known about exit %i->%i.\n",
2043 			 ex->src->index, ex->dest->index);
2044 	      continue;
2045 	    }
2046 
2047 	  if (dump_file && (dump_flags & TDF_DETAILS))
2048 	    fprintf (dump_file, "Recording prediction to %i iterations by %s.\n",
2049 		     (int)nitercst, predictor_info[predictor].name);
2050 	  /* If the prediction for number of iterations is zero, do not
2051 	     predict the exit edges.  */
2052 	  if (nitercst == 0)
2053 	    continue;
2054 
2055 	  probability = RDIV (REG_BR_PROB_BASE, nitercst);
2056 	  predict_edge (ex, predictor, probability);
2057 	}
2058       exits.release ();
2059 
2060       /* Find information about loop bound variables.  */
2061       for (nb_iter = loop->bounds; nb_iter;
2062 	   nb_iter = nb_iter->next)
2063 	if (nb_iter->stmt
2064 	    && gimple_code (nb_iter->stmt) == GIMPLE_COND)
2065 	  {
2066 	    stmt = as_a <gcond *> (nb_iter->stmt);
2067 	    break;
2068 	  }
2069       if (!stmt && last_stmt (loop->header)
2070 	  && gimple_code (last_stmt (loop->header)) == GIMPLE_COND)
2071 	stmt = as_a <gcond *> (last_stmt (loop->header));
2072       if (stmt)
2073 	is_comparison_with_loop_invariant_p (stmt, loop,
2074 					     &loop_bound_var,
2075 					     &loop_bound_code,
2076 					     &loop_bound_step,
2077 					     &loop_iv_base);
2078 
2079       bbs = get_loop_body (loop);
2080 
2081       for (j = 0; j < loop->num_nodes; j++)
2082 	{
2083 	  edge e;
2084 	  edge_iterator ei;
2085 
2086 	  bb = bbs[j];
2087 
2088 	  /* Bypass loop heuristics on continue statement.  These
2089 	     statements construct loops via "non-loop" constructs
2090 	     in the source language and are better to be handled
2091 	     separately.  */
2092 	  if (predicted_by_p (bb, PRED_CONTINUE))
2093 	    {
2094 	      if (dump_file && (dump_flags & TDF_DETAILS))
2095 		fprintf (dump_file, "BB %i predicted by continue.\n",
2096 			 bb->index);
2097 	      continue;
2098 	    }
2099 
2100 	  /* If we already used more reliable loop exit predictors, do not
2101 	     bother with PRED_LOOP_EXIT.  */
2102 	  if (!predicted_by_loop_heuristics_p (bb))
2103 	    {
2104 	      /* For loop with many exits we don't want to predict all exits
2105 	         with the pretty large probability, because if all exits are
2106 		 considered in row, the loop would be predicted to iterate
2107 		 almost never.  The code to divide probability by number of
2108 		 exits is very rough.  It should compute the number of exits
2109 		 taken in each patch through function (not the overall number
2110 		 of exits that might be a lot higher for loops with wide switch
2111 		 statements in them) and compute n-th square root.
2112 
2113 		 We limit the minimal probability by 2% to avoid
2114 		 EDGE_PROBABILITY_RELIABLE from trusting the branch prediction
2115 		 as this was causing regression in perl benchmark containing such
2116 		 a wide loop.  */
2117 
2118 	      int probability = ((REG_BR_PROB_BASE
2119 		                  - predictor_info
2120 				     [recursion
2121 				      ? PRED_LOOP_EXIT_WITH_RECURSION
2122 				      : PRED_LOOP_EXIT].hitrate)
2123 				 / n_exits);
2124 	      if (probability < HITRATE (2))
2125 		probability = HITRATE (2);
2126 	      FOR_EACH_EDGE (e, ei, bb->succs)
2127 		if (e->dest->index < NUM_FIXED_BLOCKS
2128 		    || !flow_bb_inside_loop_p (loop, e->dest))
2129 		  {
2130 		    if (dump_file && (dump_flags & TDF_DETAILS))
2131 		      fprintf (dump_file,
2132 			       "Predicting exit %i->%i with prob %i.\n",
2133 			       e->src->index, e->dest->index, probability);
2134 		    predict_edge (e,
2135 				  recursion ? PRED_LOOP_EXIT_WITH_RECURSION
2136 			          : PRED_LOOP_EXIT, probability);
2137 		  }
2138 	    }
2139 	  if (loop_bound_var)
2140 	    predict_iv_comparison (loop, bb, loop_bound_var, loop_iv_base,
2141 				   loop_bound_code,
2142 				   tree_to_shwi (loop_bound_step));
2143 	}
2144 
2145       /* In the following code
2146 	 for (loop1)
2147 	   if (cond)
2148 	     for (loop2)
2149 	       body;
2150 	 guess that cond is unlikely.  */
2151       if (loop_outer (loop)->num)
2152 	{
2153 	  basic_block bb = NULL;
2154 	  edge preheader_edge = loop_preheader_edge (loop);
2155 
2156 	  if (single_pred_p (preheader_edge->src)
2157 	      && single_succ_p (preheader_edge->src))
2158 	    preheader_edge = single_pred_edge (preheader_edge->src);
2159 
2160 	  gimple *stmt = last_stmt (preheader_edge->src);
2161 	  /* Pattern match fortran loop preheader:
2162 	     _16 = BUILTIN_EXPECT (_15, 1, PRED_FORTRAN_LOOP_PREHEADER);
2163 	     _17 = (logical(kind=4)) _16;
2164 	     if (_17 != 0)
2165 	       goto <bb 11>;
2166 	     else
2167 	       goto <bb 13>;
2168 
2169 	     Loop guard branch prediction says nothing about duplicated loop
2170 	     headers produced by fortran frontend and in this case we want
2171 	     to predict paths leading to this preheader.  */
2172 
2173 	  if (stmt
2174 	      && gimple_code (stmt) == GIMPLE_COND
2175 	      && gimple_cond_code (stmt) == NE_EXPR
2176 	      && TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
2177 	      && integer_zerop (gimple_cond_rhs (stmt)))
2178 	     {
2179 	       gimple *call_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2180 	       if (gimple_code (call_stmt) == GIMPLE_ASSIGN
2181 		   && gimple_expr_code (call_stmt) == NOP_EXPR
2182 		   && TREE_CODE (gimple_assign_rhs1 (call_stmt)) == SSA_NAME)
2183 		 call_stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (call_stmt));
2184 	       if (gimple_call_internal_p (call_stmt, IFN_BUILTIN_EXPECT)
2185 		   && TREE_CODE (gimple_call_arg (call_stmt, 2)) == INTEGER_CST
2186 		   && tree_fits_uhwi_p (gimple_call_arg (call_stmt, 2))
2187 		   && tree_to_uhwi (gimple_call_arg (call_stmt, 2))
2188 			== PRED_FORTRAN_LOOP_PREHEADER)
2189 		 bb = preheader_edge->src;
2190 	     }
2191 	  if (!bb)
2192 	    {
2193 	      if (!dominated_by_p (CDI_DOMINATORS,
2194 				   loop_outer (loop)->latch, loop->header))
2195 		predict_paths_leading_to_edge (loop_preheader_edge (loop),
2196 					       recursion
2197 					       ? PRED_LOOP_GUARD_WITH_RECURSION
2198 					       : PRED_LOOP_GUARD,
2199 					       NOT_TAKEN,
2200 					       loop_outer (loop));
2201 	    }
2202 	  else
2203 	    {
2204 	      if (!dominated_by_p (CDI_DOMINATORS,
2205 				   loop_outer (loop)->latch, bb))
2206 		predict_paths_leading_to (bb,
2207 					  recursion
2208 					  ? PRED_LOOP_GUARD_WITH_RECURSION
2209 					  : PRED_LOOP_GUARD,
2210 					  NOT_TAKEN,
2211 					  loop_outer (loop));
2212 	    }
2213 	}
2214 
2215       /* Free basic blocks from get_loop_body.  */
2216       free (bbs);
2217     }
2218 }
2219 
2220 /* Attempt to predict probabilities of BB outgoing edges using local
2221    properties.  */
2222 static void
bb_estimate_probability_locally(basic_block bb)2223 bb_estimate_probability_locally (basic_block bb)
2224 {
2225   rtx_insn *last_insn = BB_END (bb);
2226   rtx cond;
2227 
2228   if (! can_predict_insn_p (last_insn))
2229     return;
2230   cond = get_condition (last_insn, NULL, false, false);
2231   if (! cond)
2232     return;
2233 
2234   /* Try "pointer heuristic."
2235      A comparison ptr == 0 is predicted as false.
2236      Similarly, a comparison ptr1 == ptr2 is predicted as false.  */
2237   if (COMPARISON_P (cond)
2238       && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0)))
2239 	  || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1)))))
2240     {
2241       if (GET_CODE (cond) == EQ)
2242 	predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN);
2243       else if (GET_CODE (cond) == NE)
2244 	predict_insn_def (last_insn, PRED_POINTER, TAKEN);
2245     }
2246   else
2247 
2248   /* Try "opcode heuristic."
2249      EQ tests are usually false and NE tests are usually true. Also,
2250      most quantities are positive, so we can make the appropriate guesses
2251      about signed comparisons against zero.  */
2252     switch (GET_CODE (cond))
2253       {
2254       case CONST_INT:
2255 	/* Unconditional branch.  */
2256 	predict_insn_def (last_insn, PRED_UNCONDITIONAL,
2257 			  cond == const0_rtx ? NOT_TAKEN : TAKEN);
2258 	break;
2259 
2260       case EQ:
2261       case UNEQ:
2262 	/* Floating point comparisons appears to behave in a very
2263 	   unpredictable way because of special role of = tests in
2264 	   FP code.  */
2265 	if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
2266 	  ;
2267 	/* Comparisons with 0 are often used for booleans and there is
2268 	   nothing useful to predict about them.  */
2269 	else if (XEXP (cond, 1) == const0_rtx
2270 		 || XEXP (cond, 0) == const0_rtx)
2271 	  ;
2272 	else
2273 	  predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN);
2274 	break;
2275 
2276       case NE:
2277       case LTGT:
2278 	/* Floating point comparisons appears to behave in a very
2279 	   unpredictable way because of special role of = tests in
2280 	   FP code.  */
2281 	if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
2282 	  ;
2283 	/* Comparisons with 0 are often used for booleans and there is
2284 	   nothing useful to predict about them.  */
2285 	else if (XEXP (cond, 1) == const0_rtx
2286 		 || XEXP (cond, 0) == const0_rtx)
2287 	  ;
2288 	else
2289 	  predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN);
2290 	break;
2291 
2292       case ORDERED:
2293 	predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN);
2294 	break;
2295 
2296       case UNORDERED:
2297 	predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN);
2298 	break;
2299 
2300       case LE:
2301       case LT:
2302 	if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
2303 	    || XEXP (cond, 1) == constm1_rtx)
2304 	  predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN);
2305 	break;
2306 
2307       case GE:
2308       case GT:
2309 	if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
2310 	    || XEXP (cond, 1) == constm1_rtx)
2311 	  predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN);
2312 	break;
2313 
2314       default:
2315 	break;
2316       }
2317 }
2318 
2319 /* Set edge->probability for each successor edge of BB.  */
2320 void
guess_outgoing_edge_probabilities(basic_block bb)2321 guess_outgoing_edge_probabilities (basic_block bb)
2322 {
2323   bb_estimate_probability_locally (bb);
2324   combine_predictions_for_insn (BB_END (bb), bb);
2325 }
2326 
2327 static tree expr_expected_value (tree, bitmap, enum br_predictor *predictor,
2328 				 HOST_WIDE_INT *probability);
2329 
2330 /* Helper function for expr_expected_value.  */
2331 
2332 static tree
expr_expected_value_1(tree type,tree op0,enum tree_code code,tree op1,bitmap visited,enum br_predictor * predictor,HOST_WIDE_INT * probability)2333 expr_expected_value_1 (tree type, tree op0, enum tree_code code,
2334 		       tree op1, bitmap visited, enum br_predictor *predictor,
2335 		       HOST_WIDE_INT *probability)
2336 {
2337   gimple *def;
2338 
2339   /* Reset returned probability value.  */
2340   *probability = -1;
2341   *predictor = PRED_UNCONDITIONAL;
2342 
2343   if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2344     {
2345       if (TREE_CONSTANT (op0))
2346 	return op0;
2347 
2348       if (code == IMAGPART_EXPR)
2349 	{
2350 	  if (TREE_CODE (TREE_OPERAND (op0, 0)) == SSA_NAME)
2351 	    {
2352 	      def = SSA_NAME_DEF_STMT (TREE_OPERAND (op0, 0));
2353 	      if (is_gimple_call (def)
2354 		  && gimple_call_internal_p (def)
2355 		  && (gimple_call_internal_fn (def)
2356 		      == IFN_ATOMIC_COMPARE_EXCHANGE))
2357 		{
2358 		  /* Assume that any given atomic operation has low contention,
2359 		     and thus the compare-and-swap operation succeeds.  */
2360 		  *predictor = PRED_COMPARE_AND_SWAP;
2361 		  return build_one_cst (TREE_TYPE (op0));
2362 		}
2363 	    }
2364 	}
2365 
2366       if (code != SSA_NAME)
2367 	return NULL_TREE;
2368 
2369       def = SSA_NAME_DEF_STMT (op0);
2370 
2371       /* If we were already here, break the infinite cycle.  */
2372       if (!bitmap_set_bit (visited, SSA_NAME_VERSION (op0)))
2373 	return NULL;
2374 
2375       if (gimple_code (def) == GIMPLE_PHI)
2376 	{
2377 	  /* All the arguments of the PHI node must have the same constant
2378 	     length.  */
2379 	  int i, n = gimple_phi_num_args (def);
2380 	  tree val = NULL, new_val;
2381 
2382 	  for (i = 0; i < n; i++)
2383 	    {
2384 	      tree arg = PHI_ARG_DEF (def, i);
2385 	      enum br_predictor predictor2;
2386 
2387 	      /* If this PHI has itself as an argument, we cannot
2388 		 determine the string length of this argument.  However,
2389 		 if we can find an expected constant value for the other
2390 		 PHI args then we can still be sure that this is
2391 		 likely a constant.  So be optimistic and just
2392 		 continue with the next argument.  */
2393 	      if (arg == PHI_RESULT (def))
2394 		continue;
2395 
2396 	      HOST_WIDE_INT probability2;
2397 	      new_val = expr_expected_value (arg, visited, &predictor2,
2398 					     &probability2);
2399 
2400 	      /* It is difficult to combine value predictors.  Simply assume
2401 		 that later predictor is weaker and take its prediction.  */
2402 	      if (*predictor < predictor2)
2403 		{
2404 		  *predictor = predictor2;
2405 		  *probability = probability2;
2406 		}
2407 	      if (!new_val)
2408 		return NULL;
2409 	      if (!val)
2410 		val = new_val;
2411 	      else if (!operand_equal_p (val, new_val, false))
2412 		return NULL;
2413 	    }
2414 	  return val;
2415 	}
2416       if (is_gimple_assign (def))
2417 	{
2418 	  if (gimple_assign_lhs (def) != op0)
2419 	    return NULL;
2420 
2421 	  return expr_expected_value_1 (TREE_TYPE (gimple_assign_lhs (def)),
2422 					gimple_assign_rhs1 (def),
2423 					gimple_assign_rhs_code (def),
2424 					gimple_assign_rhs2 (def),
2425 					visited, predictor, probability);
2426 	}
2427 
2428       if (is_gimple_call (def))
2429 	{
2430 	  tree decl = gimple_call_fndecl (def);
2431 	  if (!decl)
2432 	    {
2433 	      if (gimple_call_internal_p (def)
2434 		  && gimple_call_internal_fn (def) == IFN_BUILTIN_EXPECT)
2435 		{
2436 		  gcc_assert (gimple_call_num_args (def) == 3);
2437 		  tree val = gimple_call_arg (def, 0);
2438 		  if (TREE_CONSTANT (val))
2439 		    return val;
2440 		  tree val2 = gimple_call_arg (def, 2);
2441 		  gcc_assert (TREE_CODE (val2) == INTEGER_CST
2442 			      && tree_fits_uhwi_p (val2)
2443 			      && tree_to_uhwi (val2) < END_PREDICTORS);
2444 		  *predictor = (enum br_predictor) tree_to_uhwi (val2);
2445 		  if (*predictor == PRED_BUILTIN_EXPECT)
2446 		    *probability
2447 		      = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
2448 		  return gimple_call_arg (def, 1);
2449 		}
2450 	      return NULL;
2451 	    }
2452 
2453 	  if (DECL_IS_MALLOC (decl) || DECL_IS_OPERATOR_NEW (decl))
2454 	    {
2455 	      if (predictor)
2456 		*predictor = PRED_MALLOC_NONNULL;
2457 	      return boolean_true_node;
2458 	    }
2459 
2460 	  if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
2461 	    switch (DECL_FUNCTION_CODE (decl))
2462 	      {
2463 	      case BUILT_IN_EXPECT:
2464 		{
2465 		  tree val;
2466 		  if (gimple_call_num_args (def) != 2)
2467 		    return NULL;
2468 		  val = gimple_call_arg (def, 0);
2469 		  if (TREE_CONSTANT (val))
2470 		    return val;
2471 		  *predictor = PRED_BUILTIN_EXPECT;
2472 		  *probability
2473 		    = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
2474 		  return gimple_call_arg (def, 1);
2475 		}
2476 	      case BUILT_IN_EXPECT_WITH_PROBABILITY:
2477 		{
2478 		  tree val;
2479 		  if (gimple_call_num_args (def) != 3)
2480 		    return NULL;
2481 		  val = gimple_call_arg (def, 0);
2482 		  if (TREE_CONSTANT (val))
2483 		    return val;
2484 		  /* Compute final probability as:
2485 		     probability * REG_BR_PROB_BASE.  */
2486 		  tree prob = gimple_call_arg (def, 2);
2487 		  tree t = TREE_TYPE (prob);
2488 		  tree base = build_int_cst (integer_type_node,
2489 					     REG_BR_PROB_BASE);
2490 		  base = build_real_from_int_cst (t, base);
2491 		  tree r = fold_build2_initializer_loc (UNKNOWN_LOCATION,
2492 							MULT_EXPR, t, prob, base);
2493 		  if (TREE_CODE (r) != REAL_CST)
2494 		    {
2495 		      error_at (gimple_location (def),
2496 				"probability %qE must be "
2497 				"constant floating-point expression", prob);
2498 		      return NULL;
2499 		    }
2500 		  HOST_WIDE_INT probi
2501 		    = real_to_integer (TREE_REAL_CST_PTR (r));
2502 		  if (probi >= 0 && probi <= REG_BR_PROB_BASE)
2503 		    {
2504 		      *predictor = PRED_BUILTIN_EXPECT_WITH_PROBABILITY;
2505 		      *probability = probi;
2506 		    }
2507 		  else
2508 		    error_at (gimple_location (def),
2509 			      "probability %qE is outside "
2510 			      "the range [0.0, 1.0]", prob);
2511 
2512 		  return gimple_call_arg (def, 1);
2513 		}
2514 
2515 	      case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N:
2516 	      case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1:
2517 	      case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
2518 	      case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
2519 	      case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8:
2520 	      case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16:
2521 	      case BUILT_IN_ATOMIC_COMPARE_EXCHANGE:
2522 	      case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N:
2523 	      case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1:
2524 	      case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
2525 	      case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
2526 	      case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
2527 	      case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
2528 		/* Assume that any given atomic operation has low contention,
2529 		   and thus the compare-and-swap operation succeeds.  */
2530 		*predictor = PRED_COMPARE_AND_SWAP;
2531 		return boolean_true_node;
2532 	      case BUILT_IN_REALLOC:
2533 		if (predictor)
2534 		  *predictor = PRED_MALLOC_NONNULL;
2535 		return boolean_true_node;
2536 	      default:
2537 		break;
2538 	    }
2539 	}
2540 
2541       return NULL;
2542     }
2543 
2544   if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
2545     {
2546       tree res;
2547       enum br_predictor predictor2;
2548       HOST_WIDE_INT probability2;
2549       op0 = expr_expected_value (op0, visited, predictor, probability);
2550       if (!op0)
2551 	return NULL;
2552       op1 = expr_expected_value (op1, visited, &predictor2, &probability2);
2553       if (!op1)
2554 	return NULL;
2555       res = fold_build2 (code, type, op0, op1);
2556       if (TREE_CODE (res) == INTEGER_CST
2557 	  && TREE_CODE (op0) == INTEGER_CST
2558 	  && TREE_CODE (op1) == INTEGER_CST)
2559 	{
2560 	  /* Combine binary predictions.  */
2561 	  if (*probability != -1 || probability2 != -1)
2562 	    {
2563 	      HOST_WIDE_INT p1 = get_predictor_value (*predictor, *probability);
2564 	      HOST_WIDE_INT p2 = get_predictor_value (predictor2, probability2);
2565 	      *probability = RDIV (p1 * p2, REG_BR_PROB_BASE);
2566 	    }
2567 
2568 	  if (*predictor < predictor2)
2569 	    *predictor = predictor2;
2570 
2571 	  return res;
2572 	}
2573       return NULL;
2574     }
2575   if (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS)
2576     {
2577       tree res;
2578       op0 = expr_expected_value (op0, visited, predictor, probability);
2579       if (!op0)
2580 	return NULL;
2581       res = fold_build1 (code, type, op0);
2582       if (TREE_CONSTANT (res))
2583 	return res;
2584       return NULL;
2585     }
2586   return NULL;
2587 }
2588 
2589 /* Return constant EXPR will likely have at execution time, NULL if unknown.
2590    The function is used by builtin_expect branch predictor so the evidence
2591    must come from this construct and additional possible constant folding.
2592 
2593    We may want to implement more involved value guess (such as value range
2594    propagation based prediction), but such tricks shall go to new
2595    implementation.  */
2596 
2597 static tree
expr_expected_value(tree expr,bitmap visited,enum br_predictor * predictor,HOST_WIDE_INT * probability)2598 expr_expected_value (tree expr, bitmap visited,
2599 		     enum br_predictor *predictor,
2600 		     HOST_WIDE_INT *probability)
2601 {
2602   enum tree_code code;
2603   tree op0, op1;
2604 
2605   if (TREE_CONSTANT (expr))
2606     {
2607       *predictor = PRED_UNCONDITIONAL;
2608       *probability = -1;
2609       return expr;
2610     }
2611 
2612   extract_ops_from_tree (expr, &code, &op0, &op1);
2613   return expr_expected_value_1 (TREE_TYPE (expr),
2614 				op0, code, op1, visited, predictor,
2615 				probability);
2616 }
2617 
2618 
2619 /* Return probability of a PREDICTOR.  If the predictor has variable
2620    probability return passed PROBABILITY.  */
2621 
2622 static HOST_WIDE_INT
get_predictor_value(br_predictor predictor,HOST_WIDE_INT probability)2623 get_predictor_value (br_predictor predictor, HOST_WIDE_INT probability)
2624 {
2625   switch (predictor)
2626     {
2627     case PRED_BUILTIN_EXPECT:
2628     case PRED_BUILTIN_EXPECT_WITH_PROBABILITY:
2629       gcc_assert (probability != -1);
2630       return probability;
2631     default:
2632       gcc_assert (probability == -1);
2633       return predictor_info[(int) predictor].hitrate;
2634     }
2635 }
2636 
2637 /* Predict using opcode of the last statement in basic block.  */
2638 static void
tree_predict_by_opcode(basic_block bb)2639 tree_predict_by_opcode (basic_block bb)
2640 {
2641   gimple *stmt = last_stmt (bb);
2642   edge then_edge;
2643   tree op0, op1;
2644   tree type;
2645   tree val;
2646   enum tree_code cmp;
2647   edge_iterator ei;
2648   enum br_predictor predictor;
2649   HOST_WIDE_INT probability;
2650 
2651   if (!stmt)
2652     return;
2653 
2654   if (gswitch *sw = dyn_cast <gswitch *> (stmt))
2655     {
2656       tree index = gimple_switch_index (sw);
2657       tree val = expr_expected_value (index, auto_bitmap (),
2658 				      &predictor, &probability);
2659       if (val && TREE_CODE (val) == INTEGER_CST)
2660 	{
2661 	  edge e = find_taken_edge_switch_expr (sw, val);
2662 	  if (predictor == PRED_BUILTIN_EXPECT)
2663 	    {
2664 	      int percent = PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY);
2665 	      gcc_assert (percent >= 0 && percent <= 100);
2666 	      predict_edge (e, PRED_BUILTIN_EXPECT,
2667 			    HITRATE (percent));
2668 	    }
2669 	  else
2670 	    predict_edge_def (e, predictor, TAKEN);
2671 	}
2672     }
2673 
2674   if (gimple_code (stmt) != GIMPLE_COND)
2675     return;
2676   FOR_EACH_EDGE (then_edge, ei, bb->succs)
2677     if (then_edge->flags & EDGE_TRUE_VALUE)
2678       break;
2679   op0 = gimple_cond_lhs (stmt);
2680   op1 = gimple_cond_rhs (stmt);
2681   cmp = gimple_cond_code (stmt);
2682   type = TREE_TYPE (op0);
2683   val = expr_expected_value_1 (boolean_type_node, op0, cmp, op1, auto_bitmap (),
2684 			       &predictor, &probability);
2685   if (val && TREE_CODE (val) == INTEGER_CST)
2686     {
2687       HOST_WIDE_INT prob = get_predictor_value (predictor, probability);
2688       if (integer_zerop (val))
2689 	prob = REG_BR_PROB_BASE - prob;
2690       predict_edge (then_edge, predictor, prob);
2691     }
2692   /* Try "pointer heuristic."
2693      A comparison ptr == 0 is predicted as false.
2694      Similarly, a comparison ptr1 == ptr2 is predicted as false.  */
2695   if (POINTER_TYPE_P (type))
2696     {
2697       if (cmp == EQ_EXPR)
2698 	predict_edge_def (then_edge, PRED_TREE_POINTER, NOT_TAKEN);
2699       else if (cmp == NE_EXPR)
2700 	predict_edge_def (then_edge, PRED_TREE_POINTER, TAKEN);
2701     }
2702   else
2703 
2704   /* Try "opcode heuristic."
2705      EQ tests are usually false and NE tests are usually true. Also,
2706      most quantities are positive, so we can make the appropriate guesses
2707      about signed comparisons against zero.  */
2708     switch (cmp)
2709       {
2710       case EQ_EXPR:
2711       case UNEQ_EXPR:
2712 	/* Floating point comparisons appears to behave in a very
2713 	   unpredictable way because of special role of = tests in
2714 	   FP code.  */
2715 	if (FLOAT_TYPE_P (type))
2716 	  ;
2717 	/* Comparisons with 0 are often used for booleans and there is
2718 	   nothing useful to predict about them.  */
2719 	else if (integer_zerop (op0) || integer_zerop (op1))
2720 	  ;
2721 	else
2722 	  predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, NOT_TAKEN);
2723 	break;
2724 
2725       case NE_EXPR:
2726       case LTGT_EXPR:
2727 	/* Floating point comparisons appears to behave in a very
2728 	   unpredictable way because of special role of = tests in
2729 	   FP code.  */
2730 	if (FLOAT_TYPE_P (type))
2731 	  ;
2732 	/* Comparisons with 0 are often used for booleans and there is
2733 	   nothing useful to predict about them.  */
2734 	else if (integer_zerop (op0)
2735 		 || integer_zerop (op1))
2736 	  ;
2737 	else
2738 	  predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, TAKEN);
2739 	break;
2740 
2741       case ORDERED_EXPR:
2742 	predict_edge_def (then_edge, PRED_TREE_FPOPCODE, TAKEN);
2743 	break;
2744 
2745       case UNORDERED_EXPR:
2746 	predict_edge_def (then_edge, PRED_TREE_FPOPCODE, NOT_TAKEN);
2747 	break;
2748 
2749       case LE_EXPR:
2750       case LT_EXPR:
2751 	if (integer_zerop (op1)
2752 	    || integer_onep (op1)
2753 	    || integer_all_onesp (op1)
2754 	    || real_zerop (op1)
2755 	    || real_onep (op1)
2756 	    || real_minus_onep (op1))
2757 	  predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, NOT_TAKEN);
2758 	break;
2759 
2760       case GE_EXPR:
2761       case GT_EXPR:
2762 	if (integer_zerop (op1)
2763 	    || integer_onep (op1)
2764 	    || integer_all_onesp (op1)
2765 	    || real_zerop (op1)
2766 	    || real_onep (op1)
2767 	    || real_minus_onep (op1))
2768 	  predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, TAKEN);
2769 	break;
2770 
2771       default:
2772 	break;
2773       }
2774 }
2775 
2776 /* Returns TRUE if the STMT is exit(0) like statement. */
2777 
2778 static bool
is_exit_with_zero_arg(const gimple * stmt)2779 is_exit_with_zero_arg (const gimple *stmt)
2780 {
2781   /* This is not exit, _exit or _Exit. */
2782   if (!gimple_call_builtin_p (stmt, BUILT_IN_EXIT)
2783       && !gimple_call_builtin_p (stmt, BUILT_IN__EXIT)
2784       && !gimple_call_builtin_p (stmt, BUILT_IN__EXIT2))
2785     return false;
2786 
2787   /* Argument is an interger zero. */
2788   return integer_zerop (gimple_call_arg (stmt, 0));
2789 }
2790 
2791 /* Try to guess whether the value of return means error code.  */
2792 
2793 static enum br_predictor
return_prediction(tree val,enum prediction * prediction)2794 return_prediction (tree val, enum prediction *prediction)
2795 {
2796   /* VOID.  */
2797   if (!val)
2798     return PRED_NO_PREDICTION;
2799   /* Different heuristics for pointers and scalars.  */
2800   if (POINTER_TYPE_P (TREE_TYPE (val)))
2801     {
2802       /* NULL is usually not returned.  */
2803       if (integer_zerop (val))
2804 	{
2805 	  *prediction = NOT_TAKEN;
2806 	  return PRED_NULL_RETURN;
2807 	}
2808     }
2809   else if (INTEGRAL_TYPE_P (TREE_TYPE (val)))
2810     {
2811       /* Negative return values are often used to indicate
2812          errors.  */
2813       if (TREE_CODE (val) == INTEGER_CST
2814 	  && tree_int_cst_sgn (val) < 0)
2815 	{
2816 	  *prediction = NOT_TAKEN;
2817 	  return PRED_NEGATIVE_RETURN;
2818 	}
2819       /* Constant return values seems to be commonly taken.
2820          Zero/one often represent booleans so exclude them from the
2821 	 heuristics.  */
2822       if (TREE_CONSTANT (val)
2823 	  && (!integer_zerop (val) && !integer_onep (val)))
2824 	{
2825 	  *prediction = NOT_TAKEN;
2826 	  return PRED_CONST_RETURN;
2827 	}
2828     }
2829   return PRED_NO_PREDICTION;
2830 }
2831 
2832 /* Return zero if phi result could have values other than -1, 0 or 1,
2833    otherwise return a bitmask, with bits 0, 1 and 2 set if -1, 0 and 1
2834    values are used or likely.  */
2835 
2836 static int
zero_one_minusone(gphi * phi,int limit)2837 zero_one_minusone (gphi *phi, int limit)
2838 {
2839   int phi_num_args = gimple_phi_num_args (phi);
2840   int ret = 0;
2841   for (int i = 0; i < phi_num_args; i++)
2842     {
2843       tree t = PHI_ARG_DEF (phi, i);
2844       if (TREE_CODE (t) != INTEGER_CST)
2845 	continue;
2846       wide_int w = wi::to_wide (t);
2847       if (w == -1)
2848 	ret |= 1;
2849       else if (w == 0)
2850 	ret |= 2;
2851       else if (w == 1)
2852 	ret |= 4;
2853       else
2854 	return 0;
2855     }
2856   for (int i = 0; i < phi_num_args; i++)
2857     {
2858       tree t = PHI_ARG_DEF (phi, i);
2859       if (TREE_CODE (t) == INTEGER_CST)
2860 	continue;
2861       if (TREE_CODE (t) != SSA_NAME)
2862 	return 0;
2863       gimple *g = SSA_NAME_DEF_STMT (t);
2864       if (gimple_code (g) == GIMPLE_PHI && limit > 0)
2865 	if (int r = zero_one_minusone (as_a <gphi *> (g), limit - 1))
2866 	  {
2867 	    ret |= r;
2868 	    continue;
2869 	  }
2870       if (!is_gimple_assign (g))
2871 	return 0;
2872       if (gimple_assign_cast_p (g))
2873 	{
2874 	  tree rhs1 = gimple_assign_rhs1 (g);
2875 	  if (TREE_CODE (rhs1) != SSA_NAME
2876 	      || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2877 	      || TYPE_PRECISION (TREE_TYPE (rhs1)) != 1
2878 	      || !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
2879 	    return 0;
2880 	  ret |= (2 | 4);
2881 	  continue;
2882 	}
2883       if (TREE_CODE_CLASS (gimple_assign_rhs_code (g)) != tcc_comparison)
2884 	return 0;
2885       ret |= (2 | 4);
2886     }
2887   return ret;
2888 }
2889 
2890 /* Find the basic block with return expression and look up for possible
2891    return value trying to apply RETURN_PREDICTION heuristics.  */
2892 static void
apply_return_prediction(void)2893 apply_return_prediction (void)
2894 {
2895   greturn *return_stmt = NULL;
2896   tree return_val;
2897   edge e;
2898   gphi *phi;
2899   int phi_num_args, i;
2900   enum br_predictor pred;
2901   enum prediction direction;
2902   edge_iterator ei;
2903 
2904   FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2905     {
2906       gimple *last = last_stmt (e->src);
2907       if (last
2908 	  && gimple_code (last) == GIMPLE_RETURN)
2909 	{
2910 	  return_stmt = as_a <greturn *> (last);
2911 	  break;
2912 	}
2913     }
2914   if (!e)
2915     return;
2916   return_val = gimple_return_retval (return_stmt);
2917   if (!return_val)
2918     return;
2919   if (TREE_CODE (return_val) != SSA_NAME
2920       || !SSA_NAME_DEF_STMT (return_val)
2921       || gimple_code (SSA_NAME_DEF_STMT (return_val)) != GIMPLE_PHI)
2922     return;
2923   phi = as_a <gphi *> (SSA_NAME_DEF_STMT (return_val));
2924   phi_num_args = gimple_phi_num_args (phi);
2925   pred = return_prediction (PHI_ARG_DEF (phi, 0), &direction);
2926 
2927   /* Avoid the case where the function returns -1, 0 and 1 values and
2928      nothing else.  Those could be qsort etc. comparison functions
2929      where the negative return isn't less probable than positive.
2930      For this require that the function returns at least -1 or 1
2931      or -1 and a boolean value or comparison result, so that functions
2932      returning just -1 and 0 are treated as if -1 represents error value.  */
2933   if (INTEGRAL_TYPE_P (TREE_TYPE (return_val))
2934       && !TYPE_UNSIGNED (TREE_TYPE (return_val))
2935       && TYPE_PRECISION (TREE_TYPE (return_val)) > 1)
2936     if (int r = zero_one_minusone (phi, 3))
2937       if ((r & (1 | 4)) == (1 | 4))
2938 	return;
2939 
2940   /* Avoid the degenerate case where all return values form the function
2941      belongs to same category (ie they are all positive constants)
2942      so we can hardly say something about them.  */
2943   for (i = 1; i < phi_num_args; i++)
2944     if (pred != return_prediction (PHI_ARG_DEF (phi, i), &direction))
2945       break;
2946   if (i != phi_num_args)
2947     for (i = 0; i < phi_num_args; i++)
2948       {
2949 	pred = return_prediction (PHI_ARG_DEF (phi, i), &direction);
2950 	if (pred != PRED_NO_PREDICTION)
2951 	  predict_paths_leading_to_edge (gimple_phi_arg_edge (phi, i), pred,
2952 				         direction);
2953       }
2954 }
2955 
2956 /* Look for basic block that contains unlikely to happen events
2957    (such as noreturn calls) and mark all paths leading to execution
2958    of this basic blocks as unlikely.  */
2959 
2960 static void
tree_bb_level_predictions(void)2961 tree_bb_level_predictions (void)
2962 {
2963   basic_block bb;
2964   bool has_return_edges = false;
2965   edge e;
2966   edge_iterator ei;
2967 
2968   FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2969     if (!unlikely_executed_edge_p (e) && !(e->flags & EDGE_ABNORMAL_CALL))
2970       {
2971         has_return_edges = true;
2972 	break;
2973       }
2974 
2975   apply_return_prediction ();
2976 
2977   FOR_EACH_BB_FN (bb, cfun)
2978     {
2979       gimple_stmt_iterator gsi;
2980 
2981       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2982 	{
2983 	  gimple *stmt = gsi_stmt (gsi);
2984 	  tree decl;
2985 
2986 	  if (is_gimple_call (stmt))
2987 	    {
2988 	      if (gimple_call_noreturn_p (stmt)
2989 		  && has_return_edges
2990 		  && !is_exit_with_zero_arg (stmt))
2991 		predict_paths_leading_to (bb, PRED_NORETURN,
2992 					  NOT_TAKEN);
2993 	      decl = gimple_call_fndecl (stmt);
2994 	      if (decl
2995 		  && lookup_attribute ("cold",
2996 				       DECL_ATTRIBUTES (decl)))
2997 		predict_paths_leading_to (bb, PRED_COLD_FUNCTION,
2998 					  NOT_TAKEN);
2999 	      if (decl && recursive_call_p (current_function_decl, decl))
3000 		predict_paths_leading_to (bb, PRED_RECURSIVE_CALL,
3001 					  NOT_TAKEN);
3002 	    }
3003 	  else if (gimple_code (stmt) == GIMPLE_PREDICT)
3004 	    {
3005 	      predict_paths_leading_to (bb, gimple_predict_predictor (stmt),
3006 					gimple_predict_outcome (stmt));
3007 	      /* Keep GIMPLE_PREDICT around so early inlining will propagate
3008 	         hints to callers.  */
3009 	    }
3010 	}
3011     }
3012 }
3013 
3014 /* Callback for hash_map::traverse, asserts that the pointer map is
3015    empty.  */
3016 
3017 bool
assert_is_empty(const_basic_block const &,edge_prediction * const & value,void *)3018 assert_is_empty (const_basic_block const &, edge_prediction *const &value,
3019 		 void *)
3020 {
3021   gcc_assert (!value);
3022   return false;
3023 }
3024 
3025 /* Predict branch probabilities and estimate profile for basic block BB.
3026    When LOCAL_ONLY is set do not use any global properties of CFG.  */
3027 
3028 static void
tree_estimate_probability_bb(basic_block bb,bool local_only)3029 tree_estimate_probability_bb (basic_block bb, bool local_only)
3030 {
3031   edge e;
3032   edge_iterator ei;
3033 
3034   FOR_EACH_EDGE (e, ei, bb->succs)
3035     {
3036       /* Look for block we are guarding (ie we dominate it,
3037 	 but it doesn't postdominate us).  */
3038       if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && e->dest != bb
3039 	  && !local_only
3040 	  && dominated_by_p (CDI_DOMINATORS, e->dest, e->src)
3041 	  && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest))
3042 	{
3043 	  gimple_stmt_iterator bi;
3044 
3045 	  /* The call heuristic claims that a guarded function call
3046 	     is improbable.  This is because such calls are often used
3047 	     to signal exceptional situations such as printing error
3048 	     messages.  */
3049 	  for (bi = gsi_start_bb (e->dest); !gsi_end_p (bi);
3050 	       gsi_next (&bi))
3051 	    {
3052 	      gimple *stmt = gsi_stmt (bi);
3053 	      if (is_gimple_call (stmt)
3054 		  && !gimple_inexpensive_call_p (as_a <gcall *>  (stmt))
3055 		  /* Constant and pure calls are hardly used to signalize
3056 		     something exceptional.  */
3057 		  && gimple_has_side_effects (stmt))
3058 		{
3059 		  if (gimple_call_fndecl (stmt))
3060 		    predict_edge_def (e, PRED_CALL, NOT_TAKEN);
3061 		  else if (virtual_method_call_p (gimple_call_fn (stmt)))
3062 		    predict_edge_def (e, PRED_POLYMORPHIC_CALL, NOT_TAKEN);
3063 		  else
3064 		    predict_edge_def (e, PRED_INDIR_CALL, TAKEN);
3065 		  break;
3066 		}
3067 	    }
3068 	}
3069     }
3070   tree_predict_by_opcode (bb);
3071 }
3072 
3073 /* Predict branch probabilities and estimate profile of the tree CFG.
3074    This function can be called from the loop optimizers to recompute
3075    the profile information.
3076    If DRY_RUN is set, do not modify CFG and only produce dump files.  */
3077 
3078 void
tree_estimate_probability(bool dry_run)3079 tree_estimate_probability (bool dry_run)
3080 {
3081   basic_block bb;
3082 
3083   add_noreturn_fake_exit_edges ();
3084   connect_infinite_loops_to_exit ();
3085   /* We use loop_niter_by_eval, which requires that the loops have
3086      preheaders.  */
3087   create_preheaders (CP_SIMPLE_PREHEADERS);
3088   calculate_dominance_info (CDI_POST_DOMINATORS);
3089   /* Decide which edges are known to be unlikely.  This improves later
3090      branch prediction. */
3091   determine_unlikely_bbs ();
3092 
3093   bb_predictions = new hash_map<const_basic_block, edge_prediction *>;
3094   tree_bb_level_predictions ();
3095   record_loop_exits ();
3096 
3097   if (number_of_loops (cfun) > 1)
3098     predict_loops ();
3099 
3100   FOR_EACH_BB_FN (bb, cfun)
3101     tree_estimate_probability_bb (bb, false);
3102 
3103   FOR_EACH_BB_FN (bb, cfun)
3104     combine_predictions_for_bb (bb, dry_run);
3105 
3106   if (flag_checking)
3107     bb_predictions->traverse<void *, assert_is_empty> (NULL);
3108 
3109   delete bb_predictions;
3110   bb_predictions = NULL;
3111 
3112   if (!dry_run)
3113     estimate_bb_frequencies (false);
3114   free_dominance_info (CDI_POST_DOMINATORS);
3115   remove_fake_exit_edges ();
3116 }
3117 
3118 /* Set edge->probability for each successor edge of BB.  */
3119 void
tree_guess_outgoing_edge_probabilities(basic_block bb)3120 tree_guess_outgoing_edge_probabilities (basic_block bb)
3121 {
3122   bb_predictions = new hash_map<const_basic_block, edge_prediction *>;
3123   tree_estimate_probability_bb (bb, true);
3124   combine_predictions_for_bb (bb, false);
3125   if (flag_checking)
3126     bb_predictions->traverse<void *, assert_is_empty> (NULL);
3127   delete bb_predictions;
3128   bb_predictions = NULL;
3129 }
3130 
3131 /* Predict edges to successors of CUR whose sources are not postdominated by
3132    BB by PRED and recurse to all postdominators.  */
3133 
3134 static void
3135 predict_paths_for_bb (basic_block cur, basic_block bb,
3136 		      enum br_predictor pred,
3137 		      enum prediction taken,
3138 		      bitmap visited, struct loop *in_loop = NULL)
3139 {
3140   edge e;
3141   edge_iterator ei;
3142   basic_block son;
3143 
3144   /* If we exited the loop or CUR is unconditional in the loop, there is
3145      nothing to do.  */
3146   if (in_loop
3147       && (!flow_bb_inside_loop_p (in_loop, cur)
3148 	  || dominated_by_p (CDI_DOMINATORS, in_loop->latch, cur)))
3149     return;
3150 
3151   /* We are looking for all edges forming edge cut induced by
3152      set of all blocks postdominated by BB.  */
3153   FOR_EACH_EDGE (e, ei, cur->preds)
3154     if (e->src->index >= NUM_FIXED_BLOCKS
3155 	&& !dominated_by_p (CDI_POST_DOMINATORS, e->src, bb))
3156     {
3157       edge e2;
3158       edge_iterator ei2;
3159       bool found = false;
3160 
3161       /* Ignore fake edges and eh, we predict them as not taken anyway.  */
3162       if (unlikely_executed_edge_p (e))
3163 	continue;
3164       gcc_assert (bb == cur || dominated_by_p (CDI_POST_DOMINATORS, cur, bb));
3165 
3166       /* See if there is an edge from e->src that is not abnormal
3167 	 and does not lead to BB and does not exit the loop.  */
3168       FOR_EACH_EDGE (e2, ei2, e->src->succs)
3169 	if (e2 != e
3170 	    && !unlikely_executed_edge_p (e2)
3171 	    && !dominated_by_p (CDI_POST_DOMINATORS, e2->dest, bb)
3172 	    && (!in_loop || !loop_exit_edge_p (in_loop, e2)))
3173 	  {
3174 	    found = true;
3175 	    break;
3176 	  }
3177 
3178       /* If there is non-abnormal path leaving e->src, predict edge
3179 	 using predictor.  Otherwise we need to look for paths
3180 	 leading to e->src.
3181 
3182 	 The second may lead to infinite loop in the case we are predicitng
3183 	 regions that are only reachable by abnormal edges.  We simply
3184 	 prevent visiting given BB twice.  */
3185       if (found)
3186 	{
3187 	  if (!edge_predicted_by_p (e, pred, taken))
3188             predict_edge_def (e, pred, taken);
3189 	}
3190       else if (bitmap_set_bit (visited, e->src->index))
3191 	predict_paths_for_bb (e->src, e->src, pred, taken, visited, in_loop);
3192     }
3193   for (son = first_dom_son (CDI_POST_DOMINATORS, cur);
3194        son;
3195        son = next_dom_son (CDI_POST_DOMINATORS, son))
3196     predict_paths_for_bb (son, bb, pred, taken, visited, in_loop);
3197 }
3198 
3199 /* Sets branch probabilities according to PREDiction and
3200    FLAGS.  */
3201 
3202 static void
predict_paths_leading_to(basic_block bb,enum br_predictor pred,enum prediction taken,struct loop * in_loop)3203 predict_paths_leading_to (basic_block bb, enum br_predictor pred,
3204 			  enum prediction taken, struct loop *in_loop)
3205 {
3206   predict_paths_for_bb (bb, bb, pred, taken, auto_bitmap (), in_loop);
3207 }
3208 
3209 /* Like predict_paths_leading_to but take edge instead of basic block.  */
3210 
3211 static void
predict_paths_leading_to_edge(edge e,enum br_predictor pred,enum prediction taken,struct loop * in_loop)3212 predict_paths_leading_to_edge (edge e, enum br_predictor pred,
3213 			       enum prediction taken, struct loop *in_loop)
3214 {
3215   bool has_nonloop_edge = false;
3216   edge_iterator ei;
3217   edge e2;
3218 
3219   basic_block bb = e->src;
3220   FOR_EACH_EDGE (e2, ei, bb->succs)
3221     if (e2->dest != e->src && e2->dest != e->dest
3222 	&& !unlikely_executed_edge_p (e)
3223 	&& !dominated_by_p (CDI_POST_DOMINATORS, e->src, e2->dest))
3224       {
3225 	has_nonloop_edge = true;
3226 	break;
3227       }
3228   if (!has_nonloop_edge)
3229     {
3230       predict_paths_for_bb (bb, bb, pred, taken, auto_bitmap (), in_loop);
3231     }
3232   else
3233     predict_edge_def (e, pred, taken);
3234 }
3235 
3236 /* This is used to carry information about basic blocks.  It is
3237    attached to the AUX field of the standard CFG block.  */
3238 
3239 struct block_info
3240 {
3241   /* Estimated frequency of execution of basic_block.  */
3242   sreal frequency;
3243 
3244   /* To keep queue of basic blocks to process.  */
3245   basic_block next;
3246 
3247   /* Number of predecessors we need to visit first.  */
3248   int npredecessors;
3249 };
3250 
3251 /* Similar information for edges.  */
3252 struct edge_prob_info
3253 {
3254   /* In case edge is a loopback edge, the probability edge will be reached
3255      in case header is.  Estimated number of iterations of the loop can be
3256      then computed as 1 / (1 - back_edge_prob).  */
3257   sreal back_edge_prob;
3258   /* True if the edge is a loopback edge in the natural loop.  */
3259   unsigned int back_edge:1;
3260 };
3261 
3262 #define BLOCK_INFO(B)	((block_info *) (B)->aux)
3263 #undef EDGE_INFO
3264 #define EDGE_INFO(E)	((edge_prob_info *) (E)->aux)
3265 
3266 /* Helper function for estimate_bb_frequencies.
3267    Propagate the frequencies in blocks marked in
3268    TOVISIT, starting in HEAD.  */
3269 
3270 static void
propagate_freq(basic_block head,bitmap tovisit)3271 propagate_freq (basic_block head, bitmap tovisit)
3272 {
3273   basic_block bb;
3274   basic_block last;
3275   unsigned i;
3276   edge e;
3277   basic_block nextbb;
3278   bitmap_iterator bi;
3279 
3280   /* For each basic block we need to visit count number of his predecessors
3281      we need to visit first.  */
3282   EXECUTE_IF_SET_IN_BITMAP (tovisit, 0, i, bi)
3283     {
3284       edge_iterator ei;
3285       int count = 0;
3286 
3287       bb = BASIC_BLOCK_FOR_FN (cfun, i);
3288 
3289       FOR_EACH_EDGE (e, ei, bb->preds)
3290 	{
3291 	  bool visit = bitmap_bit_p (tovisit, e->src->index);
3292 
3293 	  if (visit && !(e->flags & EDGE_DFS_BACK))
3294 	    count++;
3295 	  else if (visit && dump_file && !EDGE_INFO (e)->back_edge)
3296 	    fprintf (dump_file,
3297 		     "Irreducible region hit, ignoring edge to %i->%i\n",
3298 		     e->src->index, bb->index);
3299 	}
3300       BLOCK_INFO (bb)->npredecessors = count;
3301       /* When function never returns, we will never process exit block.  */
3302       if (!count && bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
3303 	bb->count = profile_count::zero ();
3304     }
3305 
3306   BLOCK_INFO (head)->frequency = 1;
3307   last = head;
3308   for (bb = head; bb; bb = nextbb)
3309     {
3310       edge_iterator ei;
3311       sreal cyclic_probability = 0;
3312       sreal frequency = 0;
3313 
3314       nextbb = BLOCK_INFO (bb)->next;
3315       BLOCK_INFO (bb)->next = NULL;
3316 
3317       /* Compute frequency of basic block.  */
3318       if (bb != head)
3319 	{
3320 	  if (flag_checking)
3321 	    FOR_EACH_EDGE (e, ei, bb->preds)
3322 	      gcc_assert (!bitmap_bit_p (tovisit, e->src->index)
3323 			  || (e->flags & EDGE_DFS_BACK));
3324 
3325 	  FOR_EACH_EDGE (e, ei, bb->preds)
3326 	    if (EDGE_INFO (e)->back_edge)
3327 	      {
3328 		cyclic_probability += EDGE_INFO (e)->back_edge_prob;
3329 	      }
3330 	    else if (!(e->flags & EDGE_DFS_BACK))
3331 	      {
3332 		/*  frequency += (e->probability
3333 				  * BLOCK_INFO (e->src)->frequency /
3334 				  REG_BR_PROB_BASE);  */
3335 
3336 		/* FIXME: Graphite is producing edges with no profile. Once
3337 		   this is fixed, drop this.  */
3338 		sreal tmp = e->probability.initialized_p () ?
3339 			    e->probability.to_reg_br_prob_base () : 0;
3340 		tmp *= BLOCK_INFO (e->src)->frequency;
3341 		tmp *= real_inv_br_prob_base;
3342 		frequency += tmp;
3343 	      }
3344 
3345 	  if (cyclic_probability == 0)
3346 	    {
3347 	      BLOCK_INFO (bb)->frequency = frequency;
3348 	    }
3349 	  else
3350 	    {
3351 	      if (cyclic_probability > real_almost_one)
3352 		cyclic_probability = real_almost_one;
3353 
3354 	      /* BLOCK_INFO (bb)->frequency = frequency
3355 					      / (1 - cyclic_probability) */
3356 
3357 	      cyclic_probability = sreal (1) - cyclic_probability;
3358 	      BLOCK_INFO (bb)->frequency = frequency / cyclic_probability;
3359 	    }
3360 	}
3361 
3362       bitmap_clear_bit (tovisit, bb->index);
3363 
3364       e = find_edge (bb, head);
3365       if (e)
3366 	{
3367 	  /* EDGE_INFO (e)->back_edge_prob
3368 	     = ((e->probability * BLOCK_INFO (bb)->frequency)
3369 	     / REG_BR_PROB_BASE); */
3370 
3371 	  /* FIXME: Graphite is producing edges with no profile. Once
3372 	     this is fixed, drop this.  */
3373 	  sreal tmp = e->probability.initialized_p () ?
3374 		      e->probability.to_reg_br_prob_base () : 0;
3375 	  tmp *= BLOCK_INFO (bb)->frequency;
3376 	  EDGE_INFO (e)->back_edge_prob = tmp * real_inv_br_prob_base;
3377 	}
3378 
3379       /* Propagate to successor blocks.  */
3380       FOR_EACH_EDGE (e, ei, bb->succs)
3381 	if (!(e->flags & EDGE_DFS_BACK)
3382 	    && BLOCK_INFO (e->dest)->npredecessors)
3383 	  {
3384 	    BLOCK_INFO (e->dest)->npredecessors--;
3385 	    if (!BLOCK_INFO (e->dest)->npredecessors)
3386 	      {
3387 		if (!nextbb)
3388 		  nextbb = e->dest;
3389 		else
3390 		  BLOCK_INFO (last)->next = e->dest;
3391 
3392 		last = e->dest;
3393 	      }
3394 	  }
3395     }
3396 }
3397 
3398 /* Estimate frequencies in loops at same nest level.  */
3399 
3400 static void
estimate_loops_at_level(struct loop * first_loop)3401 estimate_loops_at_level (struct loop *first_loop)
3402 {
3403   struct loop *loop;
3404 
3405   for (loop = first_loop; loop; loop = loop->next)
3406     {
3407       edge e;
3408       basic_block *bbs;
3409       unsigned i;
3410       auto_bitmap tovisit;
3411 
3412       estimate_loops_at_level (loop->inner);
3413 
3414       /* Find current loop back edge and mark it.  */
3415       e = loop_latch_edge (loop);
3416       EDGE_INFO (e)->back_edge = 1;
3417 
3418       bbs = get_loop_body (loop);
3419       for (i = 0; i < loop->num_nodes; i++)
3420 	bitmap_set_bit (tovisit, bbs[i]->index);
3421       free (bbs);
3422       propagate_freq (loop->header, tovisit);
3423     }
3424 }
3425 
3426 /* Propagates frequencies through structure of loops.  */
3427 
3428 static void
estimate_loops(void)3429 estimate_loops (void)
3430 {
3431   auto_bitmap tovisit;
3432   basic_block bb;
3433 
3434   /* Start by estimating the frequencies in the loops.  */
3435   if (number_of_loops (cfun) > 1)
3436     estimate_loops_at_level (current_loops->tree_root->inner);
3437 
3438   /* Now propagate the frequencies through all the blocks.  */
3439   FOR_ALL_BB_FN (bb, cfun)
3440     {
3441       bitmap_set_bit (tovisit, bb->index);
3442     }
3443   propagate_freq (ENTRY_BLOCK_PTR_FOR_FN (cfun), tovisit);
3444 }
3445 
3446 /* Drop the profile for NODE to guessed, and update its frequency based on
3447    whether it is expected to be hot given the CALL_COUNT.  */
3448 
3449 static void
drop_profile(struct cgraph_node * node,profile_count call_count)3450 drop_profile (struct cgraph_node *node, profile_count call_count)
3451 {
3452   struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
3453   /* In the case where this was called by another function with a
3454      dropped profile, call_count will be 0. Since there are no
3455      non-zero call counts to this function, we don't know for sure
3456      whether it is hot, and therefore it will be marked normal below.  */
3457   bool hot = maybe_hot_count_p (NULL, call_count);
3458 
3459   if (dump_file)
3460     fprintf (dump_file,
3461 	     "Dropping 0 profile for %s. %s based on calls.\n",
3462 	     node->dump_name (),
3463 	     hot ? "Function is hot" : "Function is normal");
3464   /* We only expect to miss profiles for functions that are reached
3465      via non-zero call edges in cases where the function may have
3466      been linked from another module or library (COMDATs and extern
3467      templates). See the comments below for handle_missing_profiles.
3468      Also, only warn in cases where the missing counts exceed the
3469      number of training runs. In certain cases with an execv followed
3470      by a no-return call the profile for the no-return call is not
3471      dumped and there can be a mismatch.  */
3472   if (!DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl)
3473       && call_count > profile_info->runs)
3474     {
3475       if (flag_profile_correction)
3476         {
3477           if (dump_file)
3478             fprintf (dump_file,
3479 		     "Missing counts for called function %s\n",
3480 		     node->dump_name ());
3481         }
3482       else
3483 	warning (0, "Missing counts for called function %s",
3484 		 node->dump_name ());
3485     }
3486 
3487   basic_block bb;
3488   if (opt_for_fn (node->decl, flag_guess_branch_prob))
3489     {
3490       bool clear_zeros
3491 	 = !ENTRY_BLOCK_PTR_FOR_FN (fn)->count.nonzero_p ();
3492       FOR_ALL_BB_FN (bb, fn)
3493 	if (clear_zeros || !(bb->count == profile_count::zero ()))
3494 	  bb->count = bb->count.guessed_local ();
3495       fn->cfg->count_max = fn->cfg->count_max.guessed_local ();
3496     }
3497   else
3498     {
3499       FOR_ALL_BB_FN (bb, fn)
3500 	bb->count = profile_count::uninitialized ();
3501       fn->cfg->count_max = profile_count::uninitialized ();
3502     }
3503 
3504   struct cgraph_edge *e;
3505   for (e = node->callees; e; e = e->next_callee)
3506     e->count = gimple_bb (e->call_stmt)->count;
3507   for (e = node->indirect_calls; e; e = e->next_callee)
3508     e->count = gimple_bb (e->call_stmt)->count;
3509   node->count = ENTRY_BLOCK_PTR_FOR_FN (fn)->count;
3510 
3511   profile_status_for_fn (fn)
3512       = (flag_guess_branch_prob ? PROFILE_GUESSED : PROFILE_ABSENT);
3513   node->frequency
3514       = hot ? NODE_FREQUENCY_HOT : NODE_FREQUENCY_NORMAL;
3515 }
3516 
3517 /* In the case of COMDAT routines, multiple object files will contain the same
3518    function and the linker will select one for the binary. In that case
3519    all the other copies from the profile instrument binary will be missing
3520    profile counts. Look for cases where this happened, due to non-zero
3521    call counts going to 0-count functions, and drop the profile to guessed
3522    so that we can use the estimated probabilities and avoid optimizing only
3523    for size.
3524 
3525    The other case where the profile may be missing is when the routine
3526    is not going to be emitted to the object file, e.g. for "extern template"
3527    class methods. Those will be marked DECL_EXTERNAL. Emit a warning in
3528    all other cases of non-zero calls to 0-count functions.  */
3529 
3530 void
handle_missing_profiles(void)3531 handle_missing_profiles (void)
3532 {
3533   struct cgraph_node *node;
3534   int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
3535   auto_vec<struct cgraph_node *, 64> worklist;
3536 
3537   /* See if 0 count function has non-0 count callers.  In this case we
3538      lost some profile.  Drop its function profile to PROFILE_GUESSED.  */
3539   FOR_EACH_DEFINED_FUNCTION (node)
3540     {
3541       struct cgraph_edge *e;
3542       profile_count call_count = profile_count::zero ();
3543       gcov_type max_tp_first_run = 0;
3544       struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
3545 
3546       if (node->count.ipa ().nonzero_p ())
3547         continue;
3548       for (e = node->callers; e; e = e->next_caller)
3549 	if (e->count.ipa ().initialized_p () && e->count.ipa () > 0)
3550 	  {
3551             call_count = call_count + e->count.ipa ();
3552 
3553 	    if (e->caller->tp_first_run > max_tp_first_run)
3554 	      max_tp_first_run = e->caller->tp_first_run;
3555 	  }
3556 
3557       /* If time profile is missing, let assign the maximum that comes from
3558 	 caller functions.  */
3559       if (!node->tp_first_run && max_tp_first_run)
3560 	node->tp_first_run = max_tp_first_run + 1;
3561 
3562       if (call_count > 0
3563           && fn && fn->cfg
3564           && (call_count.apply_scale (unlikely_count_fraction, 1)
3565 	      >= profile_info->runs))
3566         {
3567           drop_profile (node, call_count);
3568           worklist.safe_push (node);
3569         }
3570     }
3571 
3572   /* Propagate the profile dropping to other 0-count COMDATs that are
3573      potentially called by COMDATs we already dropped the profile on.  */
3574   while (worklist.length () > 0)
3575     {
3576       struct cgraph_edge *e;
3577 
3578       node = worklist.pop ();
3579       for (e = node->callees; e; e = e->next_caller)
3580         {
3581           struct cgraph_node *callee = e->callee;
3582           struct function *fn = DECL_STRUCT_FUNCTION (callee->decl);
3583 
3584           if (!(e->count.ipa () == profile_count::zero ())
3585 	      && callee->count.ipa ().nonzero_p ())
3586             continue;
3587           if ((DECL_COMDAT (callee->decl) || DECL_EXTERNAL (callee->decl))
3588 	      && fn && fn->cfg
3589               && profile_status_for_fn (fn) == PROFILE_READ)
3590             {
3591               drop_profile (node, profile_count::zero ());
3592               worklist.safe_push (callee);
3593             }
3594         }
3595     }
3596 }
3597 
3598 /* Convert counts measured by profile driven feedback to frequencies.
3599    Return nonzero iff there was any nonzero execution count.  */
3600 
3601 bool
update_max_bb_count(void)3602 update_max_bb_count (void)
3603 {
3604   profile_count true_count_max = profile_count::uninitialized ();
3605   basic_block bb;
3606 
3607   FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3608     true_count_max = true_count_max.max (bb->count);
3609 
3610   cfun->cfg->count_max = true_count_max;
3611 
3612   return true_count_max.ipa ().nonzero_p ();
3613 }
3614 
3615 /* Return true if function is likely to be expensive, so there is no point to
3616    optimize performance of prologue, epilogue or do inlining at the expense
3617    of code size growth.  THRESHOLD is the limit of number of instructions
3618    function can execute at average to be still considered not expensive.  */
3619 
3620 bool
expensive_function_p(int threshold)3621 expensive_function_p (int threshold)
3622 {
3623   basic_block bb;
3624 
3625   /* If profile was scaled in a way entry block has count 0, then the function
3626      is deifnitly taking a lot of time.  */
3627   if (!ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.nonzero_p ())
3628     return true;
3629 
3630   profile_count limit = ENTRY_BLOCK_PTR_FOR_FN
3631 			   (cfun)->count.apply_scale (threshold, 1);
3632   profile_count sum = profile_count::zero ();
3633   FOR_EACH_BB_FN (bb, cfun)
3634     {
3635       rtx_insn *insn;
3636 
3637       if (!bb->count.initialized_p ())
3638 	{
3639 	  if (dump_file)
3640 	    fprintf (dump_file, "Function is considered expensive because"
3641 		     " count of bb %i is not initialized\n", bb->index);
3642 	  return true;
3643 	}
3644 
3645       FOR_BB_INSNS (bb, insn)
3646 	if (active_insn_p (insn))
3647 	  {
3648 	    sum += bb->count;
3649 	    if (sum > limit)
3650 	      return true;
3651 	}
3652     }
3653 
3654   return false;
3655 }
3656 
3657 /* All basic blocks that are reachable only from unlikely basic blocks are
3658    unlikely.  */
3659 
3660 void
propagate_unlikely_bbs_forward(void)3661 propagate_unlikely_bbs_forward (void)
3662 {
3663   auto_vec<basic_block, 64> worklist;
3664   basic_block bb;
3665   edge_iterator ei;
3666   edge e;
3667 
3668   if (!(ENTRY_BLOCK_PTR_FOR_FN (cfun)->count == profile_count::zero ()))
3669     {
3670       ENTRY_BLOCK_PTR_FOR_FN (cfun)->aux = (void *)(size_t) 1;
3671       worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
3672 
3673       while (worklist.length () > 0)
3674 	{
3675 	  bb = worklist.pop ();
3676 	  FOR_EACH_EDGE (e, ei, bb->succs)
3677 	    if (!(e->count () == profile_count::zero ())
3678 		&& !(e->dest->count == profile_count::zero ())
3679 		&& !e->dest->aux)
3680 	      {
3681 		e->dest->aux = (void *)(size_t) 1;
3682 		worklist.safe_push (e->dest);
3683 	      }
3684 	}
3685     }
3686 
3687   FOR_ALL_BB_FN (bb, cfun)
3688     {
3689       if (!bb->aux)
3690 	{
3691 	  if (!(bb->count == profile_count::zero ())
3692 	      && (dump_file && (dump_flags & TDF_DETAILS)))
3693 	    fprintf (dump_file,
3694 		     "Basic block %i is marked unlikely by forward prop\n",
3695 		     bb->index);
3696 	  bb->count = profile_count::zero ();
3697 	}
3698       else
3699         bb->aux = NULL;
3700     }
3701 }
3702 
3703 /* Determine basic blocks/edges that are known to be unlikely executed and set
3704    their counters to zero.
3705    This is done with first identifying obviously unlikely BBs/edges and then
3706    propagating in both directions.  */
3707 
3708 static void
determine_unlikely_bbs()3709 determine_unlikely_bbs ()
3710 {
3711   basic_block bb;
3712   auto_vec<basic_block, 64> worklist;
3713   edge_iterator ei;
3714   edge e;
3715 
3716   FOR_EACH_BB_FN (bb, cfun)
3717     {
3718       if (!(bb->count == profile_count::zero ())
3719 	  && unlikely_executed_bb_p (bb))
3720 	{
3721           if (dump_file && (dump_flags & TDF_DETAILS))
3722 	    fprintf (dump_file, "Basic block %i is locally unlikely\n",
3723 		     bb->index);
3724 	  bb->count = profile_count::zero ();
3725 	}
3726 
3727       FOR_EACH_EDGE (e, ei, bb->succs)
3728 	if (!(e->probability == profile_probability::never ())
3729 	    && unlikely_executed_edge_p (e))
3730 	  {
3731             if (dump_file && (dump_flags & TDF_DETAILS))
3732 	      fprintf (dump_file, "Edge %i->%i is locally unlikely\n",
3733 		       bb->index, e->dest->index);
3734 	    e->probability = profile_probability::never ();
3735 	  }
3736 
3737       gcc_checking_assert (!bb->aux);
3738     }
3739   propagate_unlikely_bbs_forward ();
3740 
3741   auto_vec<int, 64> nsuccs;
3742   nsuccs.safe_grow_cleared (last_basic_block_for_fn (cfun));
3743   FOR_ALL_BB_FN (bb, cfun)
3744     if (!(bb->count == profile_count::zero ())
3745 	&& bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3746       {
3747 	nsuccs[bb->index] = 0;
3748         FOR_EACH_EDGE (e, ei, bb->succs)
3749 	  if (!(e->probability == profile_probability::never ())
3750 	      && !(e->dest->count == profile_count::zero ()))
3751 	    nsuccs[bb->index]++;
3752 	if (!nsuccs[bb->index])
3753 	  worklist.safe_push (bb);
3754       }
3755   while (worklist.length () > 0)
3756     {
3757       bb = worklist.pop ();
3758       if (bb->count == profile_count::zero ())
3759 	continue;
3760       if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
3761 	{
3762 	  bool found = false;
3763           for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
3764                !gsi_end_p (gsi); gsi_next (&gsi))
3765 	    if (stmt_can_terminate_bb_p (gsi_stmt (gsi))
3766 		/* stmt_can_terminate_bb_p special cases noreturns because it
3767 		   assumes that fake edges are created.  We want to know that
3768 		   noreturn alone does not imply BB to be unlikely.  */
3769 		|| (is_gimple_call (gsi_stmt (gsi))
3770 		    && (gimple_call_flags (gsi_stmt (gsi)) & ECF_NORETURN)))
3771 	      {
3772 		found = true;
3773 		break;
3774 	      }
3775 	  if (found)
3776 	    continue;
3777 	}
3778       if (dump_file && (dump_flags & TDF_DETAILS))
3779 	fprintf (dump_file,
3780 		 "Basic block %i is marked unlikely by backward prop\n",
3781 		 bb->index);
3782       bb->count = profile_count::zero ();
3783       FOR_EACH_EDGE (e, ei, bb->preds)
3784 	if (!(e->probability == profile_probability::never ()))
3785 	  {
3786 	    if (!(e->src->count == profile_count::zero ()))
3787 	      {
3788 		gcc_checking_assert (nsuccs[e->src->index] > 0);
3789 	        nsuccs[e->src->index]--;
3790 	        if (!nsuccs[e->src->index])
3791 		  worklist.safe_push (e->src);
3792 	      }
3793 	  }
3794     }
3795   /* Finally all edges from non-0 regions to 0 are unlikely.  */
3796   FOR_ALL_BB_FN (bb, cfun)
3797     {
3798       if (!(bb->count == profile_count::zero ()))
3799 	FOR_EACH_EDGE (e, ei, bb->succs)
3800 	  if (!(e->probability == profile_probability::never ())
3801 	      && e->dest->count == profile_count::zero ())
3802 	     {
3803 	       if (dump_file && (dump_flags & TDF_DETAILS))
3804 		 fprintf (dump_file, "Edge %i->%i is unlikely because "
3805 			  "it enters unlikely block\n",
3806 			  bb->index, e->dest->index);
3807 	       e->probability = profile_probability::never ();
3808 	     }
3809 
3810       edge other = NULL;
3811 
3812       FOR_EACH_EDGE (e, ei, bb->succs)
3813 	if (e->probability == profile_probability::never ())
3814 	  ;
3815 	else if (other)
3816 	  {
3817 	    other = NULL;
3818 	    break;
3819 	  }
3820 	else
3821 	  other = e;
3822       if (other
3823 	  && !(other->probability == profile_probability::always ()))
3824 	{
3825             if (dump_file && (dump_flags & TDF_DETAILS))
3826 	      fprintf (dump_file, "Edge %i->%i is locally likely\n",
3827 		       bb->index, other->dest->index);
3828 	  other->probability = profile_probability::always ();
3829 	}
3830     }
3831   if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count == profile_count::zero ())
3832     cgraph_node::get (current_function_decl)->count = profile_count::zero ();
3833 }
3834 
3835 /* Estimate and propagate basic block frequencies using the given branch
3836    probabilities.  If FORCE is true, the frequencies are used to estimate
3837    the counts even when there are already non-zero profile counts.  */
3838 
3839 void
estimate_bb_frequencies(bool force)3840 estimate_bb_frequencies (bool force)
3841 {
3842   basic_block bb;
3843   sreal freq_max;
3844 
3845   determine_unlikely_bbs ();
3846 
3847   if (force || profile_status_for_fn (cfun) != PROFILE_READ
3848       || !update_max_bb_count ())
3849     {
3850       static int real_values_initialized = 0;
3851 
3852       if (!real_values_initialized)
3853         {
3854 	  real_values_initialized = 1;
3855 	  real_br_prob_base = REG_BR_PROB_BASE;
3856 	  /* Scaling frequencies up to maximal profile count may result in
3857 	     frequent overflows especially when inlining loops.
3858 	     Small scalling results in unnecesary precision loss.  Stay in
3859 	     the half of the (exponential) range.  */
3860 	  real_bb_freq_max = (uint64_t)1 << (profile_count::n_bits / 2);
3861 	  real_one_half = sreal (1, -1);
3862 	  real_inv_br_prob_base = sreal (1) / real_br_prob_base;
3863 	  real_almost_one = sreal (1) - real_inv_br_prob_base;
3864 	}
3865 
3866       mark_dfs_back_edges ();
3867 
3868       single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->probability =
3869 	 profile_probability::always ();
3870 
3871       /* Set up block info for each basic block.  */
3872       alloc_aux_for_blocks (sizeof (block_info));
3873       alloc_aux_for_edges (sizeof (edge_prob_info));
3874       FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3875 	{
3876 	  edge e;
3877 	  edge_iterator ei;
3878 
3879 	  FOR_EACH_EDGE (e, ei, bb->succs)
3880 	    {
3881 	      /* FIXME: Graphite is producing edges with no profile. Once
3882 		 this is fixed, drop this.  */
3883 	      if (e->probability.initialized_p ())
3884 	        EDGE_INFO (e)->back_edge_prob
3885 		   = e->probability.to_reg_br_prob_base ();
3886 	      else
3887 		EDGE_INFO (e)->back_edge_prob = REG_BR_PROB_BASE / 2;
3888 	      EDGE_INFO (e)->back_edge_prob *= real_inv_br_prob_base;
3889 	    }
3890 	}
3891 
3892       /* First compute frequencies locally for each loop from innermost
3893          to outermost to examine frequencies for back edges.  */
3894       estimate_loops ();
3895 
3896       freq_max = 0;
3897       FOR_EACH_BB_FN (bb, cfun)
3898 	if (freq_max < BLOCK_INFO (bb)->frequency)
3899 	  freq_max = BLOCK_INFO (bb)->frequency;
3900 
3901       freq_max = real_bb_freq_max / freq_max;
3902       if (freq_max < 16)
3903 	freq_max = 16;
3904       profile_count ipa_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa ();
3905       cfun->cfg->count_max = profile_count::uninitialized ();
3906       FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3907 	{
3908 	  sreal tmp = BLOCK_INFO (bb)->frequency * freq_max + real_one_half;
3909 	  profile_count count = profile_count::from_gcov_type (tmp.to_int ());
3910 
3911 	  /* If we have profile feedback in which this function was never
3912 	     executed, then preserve this info.  */
3913 	  if (!(bb->count == profile_count::zero ()))
3914 	    bb->count = count.guessed_local ().combine_with_ipa_count (ipa_count);
3915           cfun->cfg->count_max = cfun->cfg->count_max.max (bb->count);
3916 	}
3917 
3918       free_aux_for_blocks ();
3919       free_aux_for_edges ();
3920     }
3921   compute_function_frequency ();
3922 }
3923 
3924 /* Decide whether function is hot, cold or unlikely executed.  */
3925 void
compute_function_frequency(void)3926 compute_function_frequency (void)
3927 {
3928   basic_block bb;
3929   struct cgraph_node *node = cgraph_node::get (current_function_decl);
3930 
3931   if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
3932       || MAIN_NAME_P (DECL_NAME (current_function_decl)))
3933     node->only_called_at_startup = true;
3934   if (DECL_STATIC_DESTRUCTOR (current_function_decl))
3935     node->only_called_at_exit = true;
3936 
3937   if (profile_status_for_fn (cfun) != PROFILE_READ)
3938     {
3939       int flags = flags_from_decl_or_type (current_function_decl);
3940       if ((ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa_p ()
3941 	   && ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa() == profile_count::zero ())
3942 	  || lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl))
3943 	     != NULL)
3944 	{
3945           node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
3946 	  warn_function_cold (current_function_decl);
3947 	}
3948       else if (lookup_attribute ("hot", DECL_ATTRIBUTES (current_function_decl))
3949 	       != NULL)
3950         node->frequency = NODE_FREQUENCY_HOT;
3951       else if (flags & ECF_NORETURN)
3952         node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3953       else if (MAIN_NAME_P (DECL_NAME (current_function_decl)))
3954         node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3955       else if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
3956 	       || DECL_STATIC_DESTRUCTOR (current_function_decl))
3957         node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3958       return;
3959     }
3960 
3961   node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
3962   warn_function_cold (current_function_decl);
3963   if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa() == profile_count::zero ())
3964     return;
3965   FOR_EACH_BB_FN (bb, cfun)
3966     {
3967       if (maybe_hot_bb_p (cfun, bb))
3968 	{
3969 	  node->frequency = NODE_FREQUENCY_HOT;
3970 	  return;
3971 	}
3972       if (!probably_never_executed_bb_p (cfun, bb))
3973 	node->frequency = NODE_FREQUENCY_NORMAL;
3974     }
3975 }
3976 
3977 /* Build PREDICT_EXPR.  */
3978 tree
build_predict_expr(enum br_predictor predictor,enum prediction taken)3979 build_predict_expr (enum br_predictor predictor, enum prediction taken)
3980 {
3981   tree t = build1 (PREDICT_EXPR, void_type_node,
3982 		   build_int_cst (integer_type_node, predictor));
3983   SET_PREDICT_EXPR_OUTCOME (t, taken);
3984   return t;
3985 }
3986 
3987 const char *
predictor_name(enum br_predictor predictor)3988 predictor_name (enum br_predictor predictor)
3989 {
3990   return predictor_info[predictor].name;
3991 }
3992 
3993 /* Predict branch probabilities and estimate profile of the tree CFG. */
3994 
3995 namespace {
3996 
3997 const pass_data pass_data_profile =
3998 {
3999   GIMPLE_PASS, /* type */
4000   "profile_estimate", /* name */
4001   OPTGROUP_NONE, /* optinfo_flags */
4002   TV_BRANCH_PROB, /* tv_id */
4003   PROP_cfg, /* properties_required */
4004   0, /* properties_provided */
4005   0, /* properties_destroyed */
4006   0, /* todo_flags_start */
4007   0, /* todo_flags_finish */
4008 };
4009 
4010 class pass_profile : public gimple_opt_pass
4011 {
4012 public:
pass_profile(gcc::context * ctxt)4013   pass_profile (gcc::context *ctxt)
4014     : gimple_opt_pass (pass_data_profile, ctxt)
4015   {}
4016 
4017   /* opt_pass methods: */
gate(function *)4018   virtual bool gate (function *) { return flag_guess_branch_prob; }
4019   virtual unsigned int execute (function *);
4020 
4021 }; // class pass_profile
4022 
4023 unsigned int
execute(function * fun)4024 pass_profile::execute (function *fun)
4025 {
4026   unsigned nb_loops;
4027 
4028   if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
4029     return 0;
4030 
4031   loop_optimizer_init (LOOPS_NORMAL);
4032   if (dump_file && (dump_flags & TDF_DETAILS))
4033     flow_loops_dump (dump_file, NULL, 0);
4034 
4035   mark_irreducible_loops ();
4036 
4037   nb_loops = number_of_loops (fun);
4038   if (nb_loops > 1)
4039     scev_initialize ();
4040 
4041   tree_estimate_probability (false);
4042 
4043   if (nb_loops > 1)
4044     scev_finalize ();
4045 
4046   loop_optimizer_finalize ();
4047   if (dump_file && (dump_flags & TDF_DETAILS))
4048     gimple_dump_cfg (dump_file, dump_flags);
4049  if (profile_status_for_fn (fun) == PROFILE_ABSENT)
4050     profile_status_for_fn (fun) = PROFILE_GUESSED;
4051  if (dump_file && (dump_flags & TDF_DETAILS))
4052    {
4053      struct loop *loop;
4054      FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
4055        if (loop->header->count.initialized_p ())
4056          fprintf (dump_file, "Loop got predicted %d to iterate %i times.\n",
4057        	   loop->num,
4058        	   (int)expected_loop_iterations_unbounded (loop));
4059    }
4060   return 0;
4061 }
4062 
4063 } // anon namespace
4064 
4065 gimple_opt_pass *
make_pass_profile(gcc::context * ctxt)4066 make_pass_profile (gcc::context *ctxt)
4067 {
4068   return new pass_profile (ctxt);
4069 }
4070 
4071 /* Return true when PRED predictor should be removed after early
4072    tree passes.  Most of the predictors are beneficial to survive
4073    as early inlining can also distribute then into caller's bodies.  */
4074 
4075 static bool
strip_predictor_early(enum br_predictor pred)4076 strip_predictor_early (enum br_predictor pred)
4077 {
4078   switch (pred)
4079     {
4080     case PRED_TREE_EARLY_RETURN:
4081       return true;
4082     default:
4083       return false;
4084     }
4085 }
4086 
4087 /* Get rid of all builtin_expect calls and GIMPLE_PREDICT statements
4088    we no longer need.  EARLY is set to true when called from early
4089    optimizations.  */
4090 
4091 unsigned int
strip_predict_hints(function * fun,bool early)4092 strip_predict_hints (function *fun, bool early)
4093 {
4094   basic_block bb;
4095   gimple *ass_stmt;
4096   tree var;
4097   bool changed = false;
4098 
4099   FOR_EACH_BB_FN (bb, fun)
4100     {
4101       gimple_stmt_iterator bi;
4102       for (bi = gsi_start_bb (bb); !gsi_end_p (bi);)
4103 	{
4104 	  gimple *stmt = gsi_stmt (bi);
4105 
4106 	  if (gimple_code (stmt) == GIMPLE_PREDICT)
4107 	    {
4108 	      if (!early
4109 		  || strip_predictor_early (gimple_predict_predictor (stmt)))
4110 		{
4111 		  gsi_remove (&bi, true);
4112 		  changed = true;
4113 		  continue;
4114 		}
4115 	    }
4116 	  else if (is_gimple_call (stmt))
4117 	    {
4118 	      tree fndecl = gimple_call_fndecl (stmt);
4119 
4120 	      if (!early
4121 		  && ((fndecl != NULL_TREE
4122 		       && fndecl_built_in_p (fndecl, BUILT_IN_EXPECT)
4123 		       && gimple_call_num_args (stmt) == 2)
4124 		      || (fndecl != NULL_TREE
4125 			  && fndecl_built_in_p (fndecl,
4126 						BUILT_IN_EXPECT_WITH_PROBABILITY)
4127 			  && gimple_call_num_args (stmt) == 3)
4128 		      || (gimple_call_internal_p (stmt)
4129 			  && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT)))
4130 		{
4131 		  var = gimple_call_lhs (stmt);
4132 	          changed = true;
4133 		  if (var)
4134 		    {
4135 		      ass_stmt
4136 			= gimple_build_assign (var, gimple_call_arg (stmt, 0));
4137 		      gsi_replace (&bi, ass_stmt, true);
4138 		    }
4139 		  else
4140 		    {
4141 		      gsi_remove (&bi, true);
4142 		      continue;
4143 		    }
4144 		}
4145 	    }
4146 	  gsi_next (&bi);
4147 	}
4148     }
4149   return changed ? TODO_cleanup_cfg : 0;
4150 }
4151 
4152 namespace {
4153 
4154 const pass_data pass_data_strip_predict_hints =
4155 {
4156   GIMPLE_PASS, /* type */
4157   "*strip_predict_hints", /* name */
4158   OPTGROUP_NONE, /* optinfo_flags */
4159   TV_BRANCH_PROB, /* tv_id */
4160   PROP_cfg, /* properties_required */
4161   0, /* properties_provided */
4162   0, /* properties_destroyed */
4163   0, /* todo_flags_start */
4164   0, /* todo_flags_finish */
4165 };
4166 
4167 class pass_strip_predict_hints : public gimple_opt_pass
4168 {
4169 public:
pass_strip_predict_hints(gcc::context * ctxt)4170   pass_strip_predict_hints (gcc::context *ctxt)
4171     : gimple_opt_pass (pass_data_strip_predict_hints, ctxt)
4172   {}
4173 
4174   /* opt_pass methods: */
clone()4175   opt_pass * clone () { return new pass_strip_predict_hints (m_ctxt); }
set_pass_param(unsigned int n,bool param)4176   void set_pass_param (unsigned int n, bool param)
4177     {
4178       gcc_assert (n == 0);
4179       early_p = param;
4180     }
4181 
4182   virtual unsigned int execute (function *);
4183 
4184 private:
4185   bool early_p;
4186 
4187 }; // class pass_strip_predict_hints
4188 
4189 unsigned int
execute(function * fun)4190 pass_strip_predict_hints::execute (function *fun)
4191 {
4192   return strip_predict_hints (fun, early_p);
4193 }
4194 
4195 } // anon namespace
4196 
4197 gimple_opt_pass *
make_pass_strip_predict_hints(gcc::context * ctxt)4198 make_pass_strip_predict_hints (gcc::context *ctxt)
4199 {
4200   return new pass_strip_predict_hints (ctxt);
4201 }
4202 
4203 /* Rebuild function frequencies.  Passes are in general expected to
4204    maintain profile by hand, however in some cases this is not possible:
4205    for example when inlining several functions with loops freuqencies might run
4206    out of scale and thus needs to be recomputed.  */
4207 
4208 void
rebuild_frequencies(void)4209 rebuild_frequencies (void)
4210 {
4211   timevar_push (TV_REBUILD_FREQUENCIES);
4212 
4213   /* When the max bb count in the function is small, there is a higher
4214      chance that there were truncation errors in the integer scaling
4215      of counts by inlining and other optimizations. This could lead
4216      to incorrect classification of code as being cold when it isn't.
4217      In that case, force the estimation of bb counts/frequencies from the
4218      branch probabilities, rather than computing frequencies from counts,
4219      which may also lead to frequencies incorrectly reduced to 0. There
4220      is less precision in the probabilities, so we only do this for small
4221      max counts.  */
4222   cfun->cfg->count_max = profile_count::uninitialized ();
4223   basic_block bb;
4224   FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
4225     cfun->cfg->count_max = cfun->cfg->count_max.max (bb->count);
4226 
4227   if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
4228     {
4229       loop_optimizer_init (0);
4230       add_noreturn_fake_exit_edges ();
4231       mark_irreducible_loops ();
4232       connect_infinite_loops_to_exit ();
4233       estimate_bb_frequencies (true);
4234       remove_fake_exit_edges ();
4235       loop_optimizer_finalize ();
4236     }
4237   else if (profile_status_for_fn (cfun) == PROFILE_READ)
4238     update_max_bb_count ();
4239   else if (profile_status_for_fn (cfun) == PROFILE_ABSENT
4240 	   && !flag_guess_branch_prob)
4241     ;
4242   else
4243     gcc_unreachable ();
4244   timevar_pop (TV_REBUILD_FREQUENCIES);
4245 }
4246 
4247 /* Perform a dry run of the branch prediction pass and report comparsion of
4248    the predicted and real profile into the dump file.  */
4249 
4250 void
report_predictor_hitrates(void)4251 report_predictor_hitrates (void)
4252 {
4253   unsigned nb_loops;
4254 
4255   loop_optimizer_init (LOOPS_NORMAL);
4256   if (dump_file && (dump_flags & TDF_DETAILS))
4257     flow_loops_dump (dump_file, NULL, 0);
4258 
4259   mark_irreducible_loops ();
4260 
4261   nb_loops = number_of_loops (cfun);
4262   if (nb_loops > 1)
4263     scev_initialize ();
4264 
4265   tree_estimate_probability (true);
4266 
4267   if (nb_loops > 1)
4268     scev_finalize ();
4269 
4270   loop_optimizer_finalize ();
4271 }
4272 
4273 /* Force edge E to be cold.
4274    If IMPOSSIBLE is true, for edge to have count and probability 0 otherwise
4275    keep low probability to represent possible error in a guess.  This is used
4276    i.e. in case we predict loop to likely iterate given number of times but
4277    we are not 100% sure.
4278 
4279    This function locally updates profile without attempt to keep global
4280    consistency which cannot be reached in full generality without full profile
4281    rebuild from probabilities alone.  Doing so is not necessarily a good idea
4282    because frequencies and counts may be more realistic then probabilities.
4283 
4284    In some cases (such as for elimination of early exits during full loop
4285    unrolling) the caller can ensure that profile will get consistent
4286    afterwards.  */
4287 
4288 void
force_edge_cold(edge e,bool impossible)4289 force_edge_cold (edge e, bool impossible)
4290 {
4291   profile_count count_sum = profile_count::zero ();
4292   profile_probability prob_sum = profile_probability::never ();
4293   edge_iterator ei;
4294   edge e2;
4295   bool uninitialized_exit = false;
4296 
4297   /* When branch probability guesses are not known, then do nothing.  */
4298   if (!impossible && !e->count ().initialized_p ())
4299     return;
4300 
4301   profile_probability goal = (impossible ? profile_probability::never ()
4302 			      : profile_probability::very_unlikely ());
4303 
4304   /* If edge is already improbably or cold, just return.  */
4305   if (e->probability <= goal
4306       && (!impossible || e->count () == profile_count::zero ()))
4307     return;
4308   FOR_EACH_EDGE (e2, ei, e->src->succs)
4309     if (e2 != e)
4310       {
4311 	if (e->flags & EDGE_FAKE)
4312 	  continue;
4313 	if (e2->count ().initialized_p ())
4314 	  count_sum += e2->count ();
4315 	if (e2->probability.initialized_p ())
4316 	  prob_sum += e2->probability;
4317 	else
4318 	  uninitialized_exit = true;
4319       }
4320 
4321   /* If we are not guessing profiles but have some other edges out,
4322      just assume the control flow goes elsewhere.  */
4323   if (uninitialized_exit)
4324     e->probability = goal;
4325   /* If there are other edges out of e->src, redistribute probabilitity
4326      there.  */
4327   else if (prob_sum > profile_probability::never ())
4328     {
4329       if (!(e->probability < goal))
4330 	e->probability = goal;
4331 
4332       profile_probability prob_comp = prob_sum / e->probability.invert ();
4333 
4334       if (dump_file && (dump_flags & TDF_DETAILS))
4335 	fprintf (dump_file, "Making edge %i->%i %s by redistributing "
4336 		 "probability to other edges.\n",
4337 		 e->src->index, e->dest->index,
4338 		 impossible ? "impossible" : "cold");
4339       FOR_EACH_EDGE (e2, ei, e->src->succs)
4340 	if (e2 != e)
4341 	  {
4342 	    e2->probability /= prob_comp;
4343 	  }
4344       if (current_ir_type () != IR_GIMPLE
4345 	  && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
4346 	update_br_prob_note (e->src);
4347     }
4348   /* If all edges out of e->src are unlikely, the basic block itself
4349      is unlikely.  */
4350   else
4351     {
4352       if (prob_sum == profile_probability::never ())
4353         e->probability = profile_probability::always ();
4354       else
4355 	{
4356 	  if (impossible)
4357 	    e->probability = profile_probability::never ();
4358 	  /* If BB has some edges out that are not impossible, we cannot
4359 	     assume that BB itself is.  */
4360 	  impossible = false;
4361 	}
4362       if (current_ir_type () != IR_GIMPLE
4363 	  && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
4364 	update_br_prob_note (e->src);
4365       if (e->src->count == profile_count::zero ())
4366 	return;
4367       if (count_sum == profile_count::zero () && impossible)
4368 	{
4369 	  bool found = false;
4370 	  if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
4371 	    ;
4372 	  else if (current_ir_type () == IR_GIMPLE)
4373 	    for (gimple_stmt_iterator gsi = gsi_start_bb (e->src);
4374 	         !gsi_end_p (gsi); gsi_next (&gsi))
4375 	      {
4376 	        if (stmt_can_terminate_bb_p (gsi_stmt (gsi)))
4377 		  {
4378 		    found = true;
4379 	            break;
4380 		  }
4381 	      }
4382 	  /* FIXME: Implement RTL path.  */
4383 	  else
4384 	    found = true;
4385 	  if (!found)
4386 	    {
4387 	      if (dump_file && (dump_flags & TDF_DETAILS))
4388 		fprintf (dump_file,
4389 			 "Making bb %i impossible and dropping count to 0.\n",
4390 			 e->src->index);
4391 	      e->src->count = profile_count::zero ();
4392 	      FOR_EACH_EDGE (e2, ei, e->src->preds)
4393 		force_edge_cold (e2, impossible);
4394 	      return;
4395 	    }
4396 	}
4397 
4398       /* If we did not adjusting, the source basic block has no likely edeges
4399  	 leaving other direction. In that case force that bb cold, too.
4400 	 This in general is difficult task to do, but handle special case when
4401 	 BB has only one predecestor.  This is common case when we are updating
4402 	 after loop transforms.  */
4403       if (!(prob_sum > profile_probability::never ())
4404 	  && count_sum == profile_count::zero ()
4405 	  && single_pred_p (e->src) && e->src->count.to_frequency (cfun)
4406 	     > (impossible ? 0 : 1))
4407 	{
4408 	  int old_frequency = e->src->count.to_frequency (cfun);
4409 	  if (dump_file && (dump_flags & TDF_DETAILS))
4410 	    fprintf (dump_file, "Making bb %i %s.\n", e->src->index,
4411 		     impossible ? "impossible" : "cold");
4412 	  int new_frequency = MIN (e->src->count.to_frequency (cfun),
4413 				   impossible ? 0 : 1);
4414 	  if (impossible)
4415 	    e->src->count = profile_count::zero ();
4416 	  else
4417 	    e->src->count = e->count ().apply_scale (new_frequency,
4418 						     old_frequency);
4419 	  force_edge_cold (single_pred_edge (e->src), impossible);
4420 	}
4421       else if (dump_file && (dump_flags & TDF_DETAILS)
4422 	       && maybe_hot_bb_p (cfun, e->src))
4423 	fprintf (dump_file, "Giving up on making bb %i %s.\n", e->src->index,
4424 		 impossible ? "impossible" : "cold");
4425     }
4426 }
4427 
4428 #if CHECKING_P
4429 
4430 namespace selftest {
4431 
4432 /* Test that value range of predictor values defined in predict.def is
4433    within range (50, 100].  */
4434 
4435 struct branch_predictor
4436 {
4437   const char *name;
4438   int probability;
4439 };
4440 
4441 #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) { NAME, HITRATE },
4442 
4443 static void
test_prediction_value_range()4444 test_prediction_value_range ()
4445 {
4446   branch_predictor predictors[] = {
4447 #include "predict.def"
4448     { NULL, PROB_UNINITIALIZED }
4449   };
4450 
4451   for (unsigned i = 0; predictors[i].name != NULL; i++)
4452     {
4453       if (predictors[i].probability == PROB_UNINITIALIZED)
4454 	continue;
4455 
4456       unsigned p = 100 * predictors[i].probability / REG_BR_PROB_BASE;
4457       ASSERT_TRUE (p >= 50 && p <= 100);
4458     }
4459 }
4460 
4461 #undef DEF_PREDICTOR
4462 
4463 /* Run all of the selfests within this file.  */
4464 
4465 void
predict_c_tests()4466 predict_c_tests ()
4467 {
4468   test_prediction_value_range ();
4469 }
4470 
4471 } // namespace selftest
4472 #endif /* CHECKING_P.  */
4473