1# Copyright 2014 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import logging
6import optparse
7
8from telemetry import decorators
9from telemetry.internal import story_runner
10from telemetry.internal.util import command_line
11from telemetry.page import legacy_page_test
12from telemetry.story import expectations as expectations_module
13from telemetry.web_perf import story_test
14from telemetry.web_perf import timeline_based_measurement
15
16Info = decorators.Info
17
18# TODO(crbug.com/859524): remove this once we update all the benchmarks in
19# tools/perf to use Info decorator.
20Owner = decorators.Info # pylint: disable=invalid-name
21
22
23class InvalidOptionsError(Exception):
24  """Raised for invalid benchmark options."""
25  pass
26
27
28class Benchmark(command_line.Command):
29  """Base class for a Telemetry benchmark.
30
31  A benchmark packages a measurement and a PageSet together.
32  Benchmarks default to using TBM unless you override the value of
33  Benchmark.test, or override the CreatePageTest method.
34
35  New benchmarks should override CreateStorySet.
36  """
37  options = {}
38  page_set = None
39  test = timeline_based_measurement.TimelineBasedMeasurement
40  SUPPORTED_PLATFORMS = [expectations_module.ALL]
41  SUPPORTED_PLATFORM_TAGS = []
42
43  def __init__(self, max_failures=None):
44    """Creates a new Benchmark.
45
46    Args:
47      max_failures: The number of story run's failures before bailing
48          from executing subsequent page runs. If None, we never bail.
49    """
50    self._max_failures = max_failures
51    # TODO: There should be an assertion here that checks that only one of
52    # the following is true:
53    # * It's a TBM benchmark, with CreateCoreTimelineBasedMeasurementOptions
54    #   defined.
55    # * It's a legacy benchmark, with either CreatePageTest defined or
56    #   Benchmark.test set.
57    # See https://github.com/catapult-project/catapult/issues/3708
58
59  def CanRunOnPlatform(self, platform, finder_options):
60    """Figures out if the benchmark is meant to support this platform.
61
62    This is based on the SUPPORTED_PLATFORMS class member of the benchmark.
63
64    This method should not be overriden or called outside of the Telemetry
65    framework.
66
67    Note that finder_options object in practice sometimes is actually not
68    a BrowserFinderOptions object but a PossibleBrowser object.
69    The key is that it can be passed to ShouldDisable, which only uses
70    finder_options.browser_type, which is available on both PossibleBrowser
71    and BrowserFinderOptions.
72    """
73    for p in self.SUPPORTED_PLATFORMS:
74      # This is reusing StoryExpectation code, so it is a bit unintuitive. We
75      # are trying to detect the opposite of the usual case in StoryExpectations
76      # so we want to return True when ShouldDisable returns true, even though
77      # we do not want to disable.
78      if p.ShouldDisable(platform, finder_options):
79        return True
80    return False
81
82  def Run(self, finder_options):
83    """Do not override this method."""
84    finder_options.target_platforms = self.GetSupportedPlatformNames(
85        self.SUPPORTED_PLATFORMS)
86    return story_runner.RunBenchmark(self, finder_options)
87
88  @property
89  def max_failures(self):
90    return self._max_failures
91
92  @classmethod
93  def Name(cls):
94    return '%s.%s' % (cls.__module__.split('.')[-1], cls.__name__)
95
96  @classmethod
97  def AddCommandLineArgs(cls, parser):
98    group = optparse.OptionGroup(parser, '%s test options' % cls.Name())
99    cls.AddBenchmarkCommandLineArgs(group)
100    if group.option_list:
101      parser.add_option_group(group)
102
103  @classmethod
104  def AddBenchmarkCommandLineArgs(cls, group):
105    del group  # unused
106
107  @classmethod
108  def GetSupportedPlatformNames(cls, supported_platforms):
109    """Returns a list of platforms supported by this benchmark.
110
111    Returns:
112      A set of names of supported platforms. The supported platforms are a list
113      of strings that would match possible values from platform.GetOsName().
114    """
115    result = set()
116    for p in supported_platforms:
117      result.update(p.GetSupportedPlatformNames())
118    return frozenset(result)
119
120  @classmethod
121  def SetArgumentDefaults(cls, parser):
122    default_values = parser.get_default_values()
123    invalid_options = [o for o in cls.options if not hasattr(default_values, o)]
124    if invalid_options:
125      raise InvalidOptionsError(
126          'Invalid benchmark options: %s' % ', '.join(invalid_options))
127    parser.set_defaults(**cls.options)
128
129  @classmethod
130  def ProcessCommandLineArgs(cls, parser, args):
131    pass
132
133  def CustomizeOptions(self, finder_options):
134    """Add options that are required by this benchmark."""
135
136  def GetBugComponents(self):
137    """Return the benchmark's Monorail component as a string."""
138    return decorators.GetComponent(self)
139
140  def GetOwners(self):
141    """Return the benchmark's owners' emails in a list."""
142    return decorators.GetEmails(self)
143
144  def GetDocumentationLinks(self):
145    """Return the benchmark's documentation links.
146
147    Returns:
148      A list of [title, url] pairs. This is the form that allows Dashboard
149      to display links properly.
150    """
151    links = []
152    url = decorators.GetDocumentationLink(self)
153    if url is not None:
154      links.append(['Benchmark documentation link', url])
155    return links
156
157  def GetInfoBlurb(self):
158    """Return any info blurb associated with the the benchmark"""
159    return decorators.GetInfoBlurb(self)
160
161  def CreateCoreTimelineBasedMeasurementOptions(self):
162    """Return the base TimelineBasedMeasurementOptions for this Benchmark.
163
164    Additional chrome and atrace categories can be appended when running the
165    benchmark with the --extra-chrome-categories and --extra-atrace-categories
166    flags.
167
168    Override this method to configure a TimelineBasedMeasurement benchmark. If
169    this is not a TimelineBasedMeasurement benchmark, override CreatePageTest
170    for PageTest tests. Do not override both methods.
171    """
172    return timeline_based_measurement.Options()
173
174  def _GetTimelineBasedMeasurementOptions(self, options):
175    """Return all timeline based measurements for the curren benchmark run.
176
177    This includes the benchmark-configured measurements in
178    CreateCoreTimelineBasedMeasurementOptions as well as the user-flag-
179    configured options from --extra-chrome-categories and
180    --extra-atrace-categories.
181    """
182    tbm_options = self.CreateCoreTimelineBasedMeasurementOptions()
183    if options and options.extra_chrome_categories:
184      # If Chrome tracing categories for this benchmark are not already
185      # enabled, there is probably a good reason why. Don't change whether
186      # Chrome tracing is enabled.
187      assert tbm_options.config.enable_chrome_trace, (
188          'This benchmark does not support Chrome tracing.')
189      tbm_options.config.chrome_trace_config.category_filter.AddFilterString(
190          options.extra_chrome_categories)
191    if options and options.extra_atrace_categories:
192      # Many benchmarks on Android run without atrace by default. Hopefully the
193      # user understands that atrace is only supported on Android when setting
194      # this option.
195      tbm_options.config.enable_atrace_trace = True
196
197      categories = tbm_options.config.atrace_config.categories
198      if isinstance(categories, basestring):
199        # Categories can either be a list or comma-separated string.
200        # https://github.com/catapult-project/catapult/issues/3712
201        categories = categories.split(',')
202      for category in options.extra_atrace_categories.split(','):
203        if category not in categories:
204          categories.append(category)
205      tbm_options.config.atrace_config.categories = categories
206    if options and options.enable_systrace:
207      tbm_options.config.chrome_trace_config.SetEnableSystrace()
208    legacy_json_format = options and options.legacy_json_trace_format
209    if legacy_json_format:
210      tbm_options.config.chrome_trace_config.SetJsonTraceFormat()
211    else:
212      tbm_options.config.chrome_trace_config.SetProtoTraceFormat()
213    if options and options.experimental_system_tracing:
214      assert not legacy_json_format
215      logging.warning('Enabling experimental system tracing!')
216      tbm_options.config.enable_experimental_system_tracing = True
217      tbm_options.config.system_trace_config.EnableChrome(
218          chrome_trace_config=tbm_options.config.chrome_trace_config)
219    if options and options.experimental_system_data_sources:
220      assert not legacy_json_format
221      tbm_options.config.enable_experimental_system_tracing = True
222      tbm_options.config.system_trace_config.EnablePower()
223      tbm_options.config.system_trace_config.EnableSysStatsCpu()
224      tbm_options.config.system_trace_config.EnableFtraceCpu()
225      tbm_options.config.system_trace_config.EnableFtraceSched()
226
227    if options and options.force_sideload_perfetto:
228      assert tbm_options.config.enable_experimental_system_tracing
229      tbm_options.config.force_sideload_perfetto = True
230
231    # TODO(crbug.com/1012687): Remove or adjust the following warnings as the
232    # development of TBMv3 progresses.
233    tbmv3_metrics = [m[6:] for m in tbm_options.GetTimelineBasedMetrics()
234                     if m.startswith('tbmv3:')]
235    if tbmv3_metrics:
236      if legacy_json_format:
237        logging.warning(
238            'Selected TBMv3 metrics will not be computed because they are not '
239            "supported in Chrome's JSON trace format.")
240      else:
241        logging.warning(
242            'The following TBMv3 metrics have been selected to run: %s. '
243            'Please note that TBMv3 is an experimental feature in active '
244            'development, and may not be supported in the future in its '
245            'current form. Follow crbug.com/1012687 for updates and to '
246            'discuss your use case before deciding to rely on this feature.',
247            ', '.join(tbmv3_metrics))
248    return tbm_options
249
250  def CreatePageTest(self, options):  # pylint: disable=unused-argument
251    """Return the PageTest for this Benchmark.
252
253    Override this method for PageTest tests.
254    Override, CreateCoreTimelineBasedMeasurementOptions to configure
255    TimelineBasedMeasurement tests. Do not override both methods.
256
257    Args:
258      options: a browser_options.BrowserFinderOptions instance
259    Returns:
260      |test()| if |test| is a PageTest class.
261      Otherwise, a TimelineBasedMeasurement instance.
262    """
263    is_page_test = issubclass(self.test, legacy_page_test.LegacyPageTest)
264    is_story_test = issubclass(self.test, story_test.StoryTest)
265    if not is_page_test and not is_story_test:
266      raise TypeError('"%s" is not a PageTest or a StoryTest.' %
267                      self.test.__name__)
268    if is_page_test:
269      # TODO: assert that CreateCoreTimelineBasedMeasurementOptions is not
270      # defined. That's incorrect for a page test. See
271      # https://github.com/catapult-project/catapult/issues/3708
272      return self.test()  # pylint: disable=no-value-for-parameter
273
274    opts = self._GetTimelineBasedMeasurementOptions(options)
275    return self.test(opts)
276
277  def CreateStorySet(self, options):
278    """Creates the instance of StorySet used to run the benchmark.
279
280    Can be overridden by subclasses.
281    """
282    del options  # unused
283    # TODO(aiolos, nednguyen, eakufner): replace class attribute page_set with
284    # story_set.
285    if not self.page_set:
286      raise NotImplementedError('This test has no "page_set" attribute.')
287    return self.page_set()  # pylint: disable=not-callable
288