1# Copyright 2012 The Chromium Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5import argparse 6import logging 7import shutil 8import sys 9import tempfile 10 11from telemetry import benchmark 12from telemetry import story 13from telemetry.internal.browser import browser_options 14from telemetry.internal.results import results_options 15from telemetry.internal import story_runner 16from telemetry.internal.util import binary_manager 17from telemetry.page import legacy_page_test 18from telemetry.util import matching 19from telemetry.util import wpr_modes 20 21from py_utils import discover 22import py_utils 23 24DEFAULT_LOG_FORMAT = ( 25 '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d ' 26 '%(message)s') 27 28 29class RecorderPageTest(legacy_page_test.LegacyPageTest): 30 def __init__(self, page_test): 31 super(RecorderPageTest, self).__init__() 32 self._page_test = page_test 33 self._platform = None 34 35 @property 36 def platform(self): 37 return self._platform 38 39 def CustomizeBrowserOptions(self, options): 40 if self._page_test: 41 self._page_test.CustomizeBrowserOptions(options) 42 43 def WillStartBrowser(self, browser): 44 if self.platform is not None: 45 assert browser.GetOSName() == self.platform 46 self._platform = browser.GetOSName() # Record platform name from browser. 47 if self._page_test: 48 self._page_test.WillStartBrowser(browser) 49 50 def DidStartBrowser(self, browser): 51 if self._page_test: 52 self._page_test.DidStartBrowser(browser) 53 54 def WillNavigateToPage(self, page, tab): 55 """Override to ensure all resources are fetched from network.""" 56 tab.ClearCache(force=False) 57 if self._page_test: 58 self._page_test.WillNavigateToPage(page, tab) 59 60 def DidNavigateToPage(self, page, tab): 61 if self._page_test: 62 self._page_test.DidNavigateToPage(page, tab) 63 tab.WaitForDocumentReadyStateToBeComplete() 64 py_utils.WaitFor(tab.HasReachedQuiescence, 30) 65 66 def CleanUpAfterPage(self, page, tab): 67 if self._page_test: 68 self._page_test.CleanUpAfterPage(page, tab) 69 70 def ValidateAndMeasurePage(self, page, tab, results): 71 if self._page_test: 72 self._page_test.ValidateAndMeasurePage(page, tab, results) 73 74 75def _GetSubclasses(base_dir, cls): 76 """Returns all subclasses of |cls| in |base_dir|. 77 78 Args: 79 cls: a class 80 81 Returns: 82 dict of {underscored_class_name: benchmark class} 83 """ 84 return discover.DiscoverClasses(base_dir, base_dir, cls, 85 index_by_class_name=True) 86 87 88def _MaybeGetInstanceOfClass(target, base_dir, cls): 89 if isinstance(target, cls): 90 return target 91 classes = _GetSubclasses(base_dir, cls) 92 return classes[target]() if target in classes else None 93 94 95def _PrintAllImpl(all_items, item_name, output_stream): 96 output_stream.write('Available %s\' names with descriptions:\n' % item_name) 97 keys = sorted(all_items.keys()) 98 key_description = [(k, all_items[k].Description()) for k in keys] 99 _PrintPairs(key_description, output_stream) 100 output_stream.write('\n') 101 102 103def _PrintAllBenchmarks(base_dir, output_stream): 104 # TODO: reuse the logic of finding supported benchmarks in benchmark_runner.py 105 # so this only prints out benchmarks that are supported by the recording 106 # platform. 107 _PrintAllImpl(_GetSubclasses(base_dir, benchmark.Benchmark), 'benchmarks', 108 output_stream) 109 110 111def _PrintAllStories(base_dir, output_stream): 112 # TODO: actually print all stories once record_wpr support general 113 # stories recording. 114 _PrintAllImpl(_GetSubclasses(base_dir, story.StorySet), 'story sets', 115 output_stream) 116 117 118def _PrintPairs(pairs, output_stream, prefix=''): 119 """Prints a list of string pairs with alignment.""" 120 first_column_length = max(len(a) for a, _ in pairs) 121 format_string = '%s%%-%ds %%s\n' % (prefix, first_column_length) 122 for a, b in pairs: 123 output_stream.write(format_string % (a, b.strip())) 124 125 126class WprRecorder(object): 127 128 def __init__(self, base_dir, target, args=None): 129 self._base_dir = base_dir 130 self._output_dir = tempfile.mkdtemp() 131 try: 132 self._options = self._CreateOptions() 133 self._benchmark = _MaybeGetInstanceOfClass(target, base_dir, 134 benchmark.Benchmark) 135 self._parser = self._options.CreateParser(usage='See %prog --help') 136 self._AddCommandLineArgs() 137 self._ParseArgs(args) 138 self._ProcessCommandLineArgs() 139 page_test = None 140 if self._benchmark is not None: 141 test = self._benchmark.CreatePageTest(self.options) 142 # Object only needed for legacy pages; newer benchmarks don't need this. 143 if isinstance(test, legacy_page_test.LegacyPageTest): 144 page_test = test 145 146 self._record_page_test = RecorderPageTest(page_test) 147 self._page_set_base_dir = ( 148 self._options.page_set_base_dir if self._options.page_set_base_dir 149 else self._base_dir) 150 self._story_set = self._GetStorySet(target) 151 except: 152 self._CleanUp() 153 raise 154 155 def __enter__(self): 156 return self 157 158 def __exit__(self, *args): 159 self._CleanUp() 160 161 @property 162 def options(self): 163 return self._options 164 165 def _CreateOptions(self): 166 options = browser_options.BrowserFinderOptions() 167 options.browser_options.wpr_mode = wpr_modes.WPR_RECORD 168 options.intermediate_dir = self._output_dir 169 return options 170 171 def _CleanUp(self): 172 shutil.rmtree(self._output_dir) 173 174 def CreateResults(self): 175 if self._benchmark is not None: 176 benchmark_name = self._benchmark.Name() 177 benchmark_description = self._benchmark.Description() 178 else: 179 benchmark_name = 'record_wpr' 180 benchmark_description = None 181 182 return results_options.CreateResults( 183 self._options, 184 benchmark_name=benchmark_name, 185 benchmark_description=benchmark_description, 186 report_progress=True) 187 188 def _AddCommandLineArgs(self): 189 self._parser.add_option('--page-set-base-dir', action='store', 190 type='string') 191 story_runner.AddCommandLineArgs(self._parser) 192 if self._benchmark is not None: 193 self._benchmark.AddCommandLineArgs(self._parser) 194 self._benchmark.SetArgumentDefaults(self._parser) 195 self._parser.add_option('--upload', action='store_true') 196 self._parser.add_option('--use-local-wpr', action='store_true', 197 help='Builds and runs WPR from Catapult. ' 198 'Also enables WPR debug output to STDOUT.') 199 self._SetArgumentDefaults() 200 201 def _SetArgumentDefaults(self): 202 self._parser.set_defaults(output_formats=['none']) 203 204 def _ParseArgs(self, args=None): 205 args_to_parse = sys.argv[1:] if args is None else args 206 self._parser.parse_args(args_to_parse) 207 208 def _ProcessCommandLineArgs(self): 209 story_runner.ProcessCommandLineArgs(self._parser, self._options) 210 211 if self._options.use_live_sites: 212 self._parser.error("Can't --use-live-sites while recording") 213 214 if self._benchmark is not None: 215 self._benchmark.ProcessCommandLineArgs(self._parser, self._options) 216 217 def _GetStorySet(self, target): 218 if self._benchmark is not None: 219 return self._benchmark.CreateStorySet(self._options) 220 story_set = _MaybeGetInstanceOfClass(target, self._page_set_base_dir, 221 story.StorySet) 222 if story_set is None: 223 sys.stderr.write('Target %s is neither benchmark nor story set.\n' 224 % target) 225 if not self._HintMostLikelyBenchmarksStories(target): 226 sys.stderr.write( 227 'Found no similar benchmark or story. Please use ' 228 '--list-benchmarks or --list-stories to list candidates.\n') 229 self._parser.print_usage() 230 sys.exit(1) 231 return story_set 232 233 def _HintMostLikelyBenchmarksStories(self, target): 234 def _Impl(all_items, category_name): 235 candidates = matching.GetMostLikelyMatchedObject( 236 all_items.iteritems(), target, name_func=lambda kv: kv[1].Name()) 237 if candidates: 238 sys.stderr.write('\nDo you mean any of those %s below?\n' % 239 category_name) 240 _PrintPairs([(k, v.Description()) for k, v in candidates], sys.stderr) 241 return True 242 return False 243 244 has_benchmark_hint = _Impl( 245 _GetSubclasses(self._base_dir, benchmark.Benchmark), 'benchmarks') 246 has_story_hint = _Impl( 247 _GetSubclasses(self._base_dir, story.StorySet), 'stories') 248 return has_benchmark_hint or has_story_hint 249 250 def Record(self, results): 251 assert self._story_set.wpr_archive_info, ( 252 'Pageset archive_data_file path must be specified.') 253 254 # Always record the benchmark one time only. 255 self._options.pageset_repeat = 1 256 self._story_set.wpr_archive_info.AddNewTemporaryRecording() 257 self._record_page_test.CustomizeBrowserOptions(self._options) 258 story_runner.RunStorySet( 259 self._record_page_test, 260 self._story_set, 261 self._options, 262 results) 263 264 def HandleResults(self, results, upload_to_cloud_storage): 265 if results.had_failures or results.had_skips: 266 logging.warning('Some pages failed and/or were skipped. The recording ' 267 'has not been updated for these pages.') 268 results.Finalize() 269 self._story_set.wpr_archive_info.AddRecordedStories( 270 [run.story for run in results.IterStoryRuns() if run.ok], 271 upload_to_cloud_storage, 272 target_platform=self._record_page_test.platform) 273 274 275def Main(environment, **log_config_kwargs): 276 # the log level is set in browser_options 277 log_config_kwargs.pop('level', None) 278 log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT) 279 logging.basicConfig(**log_config_kwargs) 280 281 parser = argparse.ArgumentParser( 282 usage='Record a benchmark or a story (page set).') 283 parser.add_argument( 284 'benchmark', 285 help=('benchmark name. This argument is optional. If both benchmark name ' 286 'and story name are specified, this takes precedence as the ' 287 'target of the recording.'), 288 nargs='?') 289 parser.add_argument('--story', help='story (page set) name') 290 parser.add_argument('--list-stories', dest='list_stories', 291 action='store_true', help='list all story names.') 292 parser.add_argument('--list-benchmarks', dest='list_benchmarks', 293 action='store_true', help='list all benchmark names.') 294 parser.add_argument('--upload', action='store_true', 295 help='upload to cloud storage.') 296 297 args, extra_args = parser.parse_known_args() 298 299 if args.list_benchmarks or args.list_stories: 300 if args.list_benchmarks: 301 _PrintAllBenchmarks(environment.top_level_dir, sys.stderr) 302 if args.list_stories: 303 _PrintAllStories(environment.top_level_dir, sys.stderr) 304 return 0 305 306 target = args.benchmark or args.story 307 308 if not target: 309 sys.stderr.write('Please specify target (benchmark or story). Please refer ' 310 'usage below\n\n') 311 parser.print_help() 312 return 0 313 314 binary_manager.InitDependencyManager(environment.client_configs) 315 316 # TODO(crbug.com/1111556): update WprRecorder so that it handles the 317 # difference between recording a benchmark vs recording a story better based 318 # on the distinction between args.benchmark & args.story 319 with WprRecorder(environment.top_level_dir, 320 target, extra_args) as wpr_recorder: 321 results = wpr_recorder.CreateResults() 322 wpr_recorder.Record(results) 323 wpr_recorder.HandleResults(results, args.upload) 324 return min(255, results.num_failed) 325