1 /* Profile counter container type.
2    Copyright (C) 2017-2019 Free Software Foundation, Inc.
3    Contributed by Jan Hubicka
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "profile-count.h"
25 #include "options.h"
26 #include "tree.h"
27 #include "basic-block.h"
28 #include "function.h"
29 #include "cfg.h"
30 #include "gimple.h"
31 #include "data-streamer.h"
32 #include "cgraph.h"
33 #include "wide-int.h"
34 #include "sreal.h"
35 
36 /* Get a string describing QUALITY.  */
37 
38 const char *
profile_quality_as_string(enum profile_quality quality)39 profile_quality_as_string (enum profile_quality quality)
40 {
41   switch (quality)
42     {
43     default:
44       gcc_unreachable ();
45     case profile_uninitialized:
46       return "uninitialized";
47     case profile_guessed_local:
48       return "guessed_local";
49     case profile_guessed_global0:
50       return "guessed_global0";
51     case profile_guessed_global0adjusted:
52       return "guessed_global0adjusted";
53     case profile_guessed:
54       return "guessed";
55     case profile_afdo:
56       return "afdo";
57     case profile_adjusted:
58       return "adjusted";
59     case profile_precise:
60       return "precise";
61     }
62 }
63 
64 /* Dump THIS to F.  */
65 
66 void
dump(FILE * f)67 profile_count::dump (FILE *f) const
68 {
69   if (!initialized_p ())
70     fprintf (f, "uninitialized");
71   else
72     {
73       fprintf (f, "%" PRId64, m_val);
74       if (m_quality == profile_guessed_local)
75 	fprintf (f, " (estimated locally)");
76       else if (m_quality == profile_guessed_global0)
77 	fprintf (f, " (estimated locally, globally 0)");
78       else if (m_quality == profile_guessed_global0adjusted)
79 	fprintf (f, " (estimated locally, globally 0 adjusted)");
80       else if (m_quality == profile_adjusted)
81 	fprintf (f, " (adjusted)");
82       else if (m_quality == profile_afdo)
83 	fprintf (f, " (auto FDO)");
84       else if (m_quality == profile_guessed)
85 	fprintf (f, " (guessed)");
86       else if (m_quality == profile_precise)
87 	fprintf (f, " (precise)");
88     }
89 }
90 
91 /* Dump THIS to stderr.  */
92 
93 void
debug()94 profile_count::debug () const
95 {
96   dump (stderr);
97   fprintf (stderr, "\n");
98 }
99 
100 /* Return true if THIS differs from OTHER; tolerate small diferences.  */
101 
102 bool
differs_from_p(profile_count other)103 profile_count::differs_from_p (profile_count other) const
104 {
105   gcc_checking_assert (compatible_p (other));
106   if (!initialized_p () || !other.initialized_p ())
107     return false;
108   if ((uint64_t)m_val - (uint64_t)other.m_val < 100
109       || (uint64_t)other.m_val - (uint64_t)m_val < 100)
110     return false;
111   if (!other.m_val)
112     return true;
113   int64_t ratio = (int64_t)m_val * 100 / other.m_val;
114   return ratio < 99 || ratio > 101;
115 }
116 
117 /* Stream THIS from IB.  */
118 
119 profile_count
stream_in(struct lto_input_block * ib)120 profile_count::stream_in (struct lto_input_block *ib)
121 {
122   profile_count ret;
123   ret.m_val = streamer_read_gcov_count (ib);
124   ret.m_quality = (profile_quality) streamer_read_uhwi (ib);
125   return ret;
126 }
127 
128 /* Stream THIS to OB.  */
129 
130 void
stream_out(struct output_block * ob)131 profile_count::stream_out (struct output_block *ob)
132 {
133   streamer_write_gcov_count (ob, m_val);
134   streamer_write_uhwi (ob, m_quality);
135 }
136 
137 /* Stream THIS to OB.  */
138 
139 void
stream_out(struct lto_output_stream * ob)140 profile_count::stream_out (struct lto_output_stream *ob)
141 {
142   streamer_write_gcov_count_stream (ob, m_val);
143   streamer_write_uhwi_stream (ob, m_quality);
144 }
145 
146 /* Dump THIS to F.  */
147 
148 void
dump(FILE * f)149 profile_probability::dump (FILE *f) const
150 {
151   if (!initialized_p ())
152     fprintf (f, "uninitialized");
153   else
154     {
155       /* Make difference between 0.00 as a roundoff error and actual 0.
156 	 Similarly for 1.  */
157       if (m_val == 0)
158         fprintf (f, "never");
159       else if (m_val == max_probability)
160         fprintf (f, "always");
161       else
162         fprintf (f, "%3.1f%%", (double)m_val * 100 / max_probability);
163       if (m_quality == profile_adjusted)
164 	fprintf (f, " (adjusted)");
165       else if (m_quality == profile_afdo)
166 	fprintf (f, " (auto FDO)");
167       else if (m_quality == profile_guessed)
168 	fprintf (f, " (guessed)");
169     }
170 }
171 
172 /* Dump THIS to stderr.  */
173 
174 void
debug()175 profile_probability::debug () const
176 {
177   dump (stderr);
178   fprintf (stderr, "\n");
179 }
180 
181 /* Return true if THIS differs from OTHER; tolerate small diferences.  */
182 
183 bool
differs_from_p(profile_probability other)184 profile_probability::differs_from_p (profile_probability other) const
185 {
186   if (!initialized_p () || !other.initialized_p ())
187     return false;
188   if ((uint64_t)m_val - (uint64_t)other.m_val < max_probability / 1000
189       || (uint64_t)other.m_val - (uint64_t)max_probability < 1000)
190     return false;
191   if (!other.m_val)
192     return true;
193   int64_t ratio = (int64_t)m_val * 100 / other.m_val;
194   return ratio < 99 || ratio > 101;
195 }
196 
197 /* Return true if THIS differs significantly from OTHER.  */
198 
199 bool
differs_lot_from_p(profile_probability other)200 profile_probability::differs_lot_from_p (profile_probability other) const
201 {
202   if (!initialized_p () || !other.initialized_p ())
203     return false;
204   uint32_t d = m_val > other.m_val ? m_val - other.m_val : other.m_val - m_val;
205   return d > max_probability / 2;
206 }
207 
208 /* Stream THIS from IB.  */
209 
210 profile_probability
stream_in(struct lto_input_block * ib)211 profile_probability::stream_in (struct lto_input_block *ib)
212 {
213   profile_probability ret;
214   ret.m_val = streamer_read_uhwi (ib);
215   ret.m_quality = (profile_quality) streamer_read_uhwi (ib);
216   return ret;
217 }
218 
219 /* Stream THIS to OB.  */
220 
221 void
stream_out(struct output_block * ob)222 profile_probability::stream_out (struct output_block *ob)
223 {
224   streamer_write_uhwi (ob, m_val);
225   streamer_write_uhwi (ob, m_quality);
226 }
227 
228 /* Stream THIS to OB.  */
229 
230 void
stream_out(struct lto_output_stream * ob)231 profile_probability::stream_out (struct lto_output_stream *ob)
232 {
233   streamer_write_uhwi_stream (ob, m_val);
234   streamer_write_uhwi_stream (ob, m_quality);
235 }
236 
237 /* Compute RES=(a*b + c/2)/c capping and return false if overflow happened.  */
238 
239 bool
slow_safe_scale_64bit(uint64_t a,uint64_t b,uint64_t c,uint64_t * res)240 slow_safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
241 {
242   FIXED_WIDE_INT (128) tmp = a;
243   wi::overflow_type overflow;
244   tmp = wi::udiv_floor (wi::umul (tmp, b, &overflow) + (c / 2), c);
245   gcc_checking_assert (!overflow);
246   if (wi::fits_uhwi_p (tmp))
247     {
248       *res = tmp.to_uhwi ();
249       return true;
250     }
251   *res = (uint64_t) -1;
252   return false;
253 }
254 
255 /* Return count as frequency within FUN scaled in range 0 to REG_FREQ_MAX
256    Used for legacy code and should not be used anymore.  */
257 
258 int
to_frequency(struct function * fun)259 profile_count::to_frequency (struct function *fun) const
260 {
261   if (!initialized_p ())
262     return BB_FREQ_MAX;
263   if (*this == profile_count::zero ())
264     return 0;
265   STATIC_ASSERT (REG_BR_PROB_BASE == BB_FREQ_MAX);
266   gcc_assert (fun->cfg->count_max.initialized_p ());
267   profile_probability prob = probability_in (fun->cfg->count_max);
268   if (!prob.initialized_p ())
269     return REG_BR_PROB_BASE;
270   return prob.to_reg_br_prob_base ();
271 }
272 
273 /* Return count as frequency within FUN scaled in range 0 to CGRAPH_FREQ_MAX
274    where CGRAPH_FREQ_BASE means that count equals to entry block count.
275    Used for legacy code and should not be used anymore.  */
276 
277 int
to_cgraph_frequency(profile_count entry_bb_count)278 profile_count::to_cgraph_frequency (profile_count entry_bb_count) const
279 {
280   if (!initialized_p () || !entry_bb_count.initialized_p ())
281     return CGRAPH_FREQ_BASE;
282   if (*this == profile_count::zero ())
283     return 0;
284   gcc_checking_assert (entry_bb_count.initialized_p ());
285   uint64_t scale;
286   if (!safe_scale_64bit (!entry_bb_count.m_val ? m_val + 1 : m_val,
287 			 CGRAPH_FREQ_BASE, MAX (1, entry_bb_count.m_val), &scale))
288     return CGRAPH_FREQ_MAX;
289   return MIN (scale, CGRAPH_FREQ_MAX);
290 }
291 
292 /* Return THIS/IN as sreal value.  */
293 
294 sreal
to_sreal_scale(profile_count in,bool * known)295 profile_count::to_sreal_scale (profile_count in, bool *known) const
296 {
297   if (!initialized_p () || !in.initialized_p ())
298     {
299       if (known)
300 	*known = false;
301       return 1;
302     }
303   if (known)
304     *known = true;
305   if (*this == profile_count::zero ())
306     return 0;
307 
308   if (!in.m_val)
309     {
310       if (!m_val)
311 	return 1;
312       return m_val * 4;
313     }
314   return (sreal)m_val / (sreal)in.m_val;
315 }
316 
317 /* We want to scale profile across function boundary from NUM to DEN.
318    Take care of the side case when DEN is zeros.  We still want to behave
319    sanely here which means
320      - scale to profile_count::zero () if NUM is profile_count::zero
321      - do not affect anything if NUM == DEN
322      - preserve counter value but adjust quality in other cases.  */
323 
324 void
adjust_for_ipa_scaling(profile_count * num,profile_count * den)325 profile_count::adjust_for_ipa_scaling (profile_count *num,
326 				       profile_count *den)
327 {
328   /* Scaling is no-op if NUM and DEN are the same.  */
329   if (*num == *den)
330     return;
331   /* Scaling to zero is always zero.  */
332   if (*num == profile_count::zero ())
333     return;
334   /* If den is non-zero we are safe.  */
335   if (den->force_nonzero () == *den)
336     return;
337   /* Force both to non-zero so we do not push profiles to 0 when
338      both num == 0 and den == 0.  */
339   *den = den->force_nonzero ();
340   *num = num->force_nonzero ();
341 }
342 
343 /* THIS is a count of bb which is known to be executed IPA times.
344    Combine this information into bb counter.  This means returning IPA
345    if it is nonzero, not changing anything if IPA is uninitialized
346    and if IPA is zero, turning THIS into corresponding local profile with
347    global0.  */
348 profile_count
combine_with_ipa_count(profile_count ipa)349 profile_count::combine_with_ipa_count (profile_count ipa)
350 {
351   ipa = ipa.ipa ();
352   if (ipa.nonzero_p ())
353     return ipa;
354   if (!ipa.initialized_p () || *this == profile_count::zero ())
355     return *this;
356   if (ipa == profile_count::zero ())
357     return this->global0 ();
358   return this->global0adjusted ();
359 }
360 
361 /* The profiling runtime uses gcov_type, which is usually 64bit integer.
362    Conversions back and forth are used to read the coverage and get it
363    into internal representation.  */
364 profile_count
from_gcov_type(gcov_type v)365 profile_count::from_gcov_type (gcov_type v)
366   {
367     profile_count ret;
368     gcc_checking_assert (v >= 0);
369     if (dump_file && v >= (gcov_type)max_count)
370       fprintf (dump_file,
371 	       "Capping gcov count %" PRId64 " to max_count %" PRId64 "\n",
372 	       (int64_t) v, (int64_t) max_count);
373     ret.m_val = MIN (v, (gcov_type)max_count);
374     ret.m_quality = profile_precise;
375     return ret;
376   }
377 
378 
379 /* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER
380    happens with COUNT2 probablity. Return probablity that either *THIS or
381    OTHER happens.  */
382 
383 profile_probability
combine_with_count(profile_count count1,profile_probability other,profile_count count2)384 profile_probability::combine_with_count (profile_count count1,
385 					 profile_probability other,
386 					 profile_count count2) const
387 {
388   /* If probabilities are same, we are done.
389      If counts are nonzero we can distribute accordingly. In remaining
390      cases just avreage the values and hope for the best.  */
391   if (*this == other || count1 == count2
392       || (count2 == profile_count::zero ()
393 	  && !(count1 == profile_count::zero ())))
394     return *this;
395   if (count1 == profile_count::zero () && !(count2 == profile_count::zero ()))
396     return other;
397   else if (count1.nonzero_p () || count2.nonzero_p ())
398     return *this * count1.probability_in (count1 + count2)
399 	   + other * count2.probability_in (count1 + count2);
400   else
401     return *this * profile_probability::even ()
402 	   + other * profile_probability::even ();
403 }
404