1#!/usr/bin/env vpython
2# Copyright 2018 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6from __future__ import print_function
7
8import argparse
9import collections
10import json
11import logging
12import multiprocessing
13import os
14import shutil
15import sys
16import tempfile
17import time
18import uuid
19
20logging.basicConfig(
21    level=logging.INFO,
22    format='(%(levelname)s) %(asctime)s pid=%(process)d'
23           '  %(module)s.%(funcName)s:%(lineno)d  %(message)s')
24
25from core import path_util
26from core import upload_results_to_perf_dashboard
27from core import results_merger
28
29path_util.AddAndroidPylibToPath()
30
31try:
32  from pylib.utils import logdog_helper
33except ImportError:
34  pass
35
36
37RESULTS_URL = 'https://chromeperf.appspot.com'
38
39# Until we are migrated to LUCI, we will be utilizing a hard
40# coded master name based on what is passed in in the build properties.
41# See crbug.com/801289 for more details.
42MACHINE_GROUP_JSON_FILE = os.path.join(
43      path_util.GetChromiumSrcDir(), 'tools', 'perf', 'core',
44      'perf_dashboard_machine_group_mapping.json')
45
46JSON_CONTENT_TYPE = 'application/json'
47
48# Cache of what data format (ChartJSON, Histograms, etc.) each results file is
49# in so that only one disk read is required when checking the format multiple
50# times.
51_data_format_cache = {}
52DATA_FORMAT_GTEST = 'gtest'
53DATA_FORMAT_CHARTJSON = 'chartjson'
54DATA_FORMAT_HISTOGRAMS = 'histograms'
55DATA_FORMAT_UNKNOWN = 'unknown'
56
57
58def _GetMachineGroup(build_properties):
59  machine_group = None
60  if build_properties.get('perf_dashboard_machine_group', False):
61    # Once luci migration is complete this will exist as a property
62    # in the build properties
63    machine_group =  build_properties['perf_dashboard_machine_group']
64  else:
65    mastername_mapping = {}
66    with open(MACHINE_GROUP_JSON_FILE) as fp:
67      mastername_mapping = json.load(fp)
68      legacy_mastername = build_properties['mastername']
69      if mastername_mapping.get(legacy_mastername):
70        machine_group = mastername_mapping[legacy_mastername]
71  if not machine_group:
72    raise ValueError(
73        'Must set perf_dashboard_machine_group or have a valid '
74        'mapping in '
75        'src/tools/perf/core/perf_dashboard_machine_group_mapping.json'
76        'See bit.ly/perf-dashboard-machine-group for more details')
77  return machine_group
78
79
80def _upload_perf_results(json_to_upload, name, configuration_name,
81    build_properties, output_json_file):
82  """Upload the contents of result JSON(s) to the perf dashboard."""
83  args= [
84      '--buildername', build_properties['buildername'],
85      '--buildnumber', build_properties['buildnumber'],
86      '--name', name,
87      '--configuration-name', configuration_name,
88      '--results-file', json_to_upload,
89      '--results-url', RESULTS_URL,
90      '--got-revision-cp', build_properties['got_revision_cp'],
91      '--got-v8-revision', build_properties['got_v8_revision'],
92      '--got-webrtc-revision', build_properties['got_webrtc_revision'],
93      '--output-json-file', output_json_file,
94      '--perf-dashboard-machine-group', _GetMachineGroup(build_properties)
95  ]
96  buildbucket = build_properties.get('buildbucket', {})
97  if isinstance(buildbucket, basestring):
98    buildbucket = json.loads(buildbucket)
99
100  if 'build' in buildbucket:
101    args += [
102      '--project', buildbucket['build'].get('project'),
103      '--buildbucket', buildbucket['build'].get('bucket'),
104    ]
105
106  if build_properties.get('git_revision'):
107    args.append('--git-revision')
108    args.append(build_properties['git_revision'])
109  if _is_histogram(json_to_upload):
110    args.append('--send-as-histograms')
111
112  #TODO(crbug.com/1072729): log this in top level
113  logging.info('upload_results_to_perf_dashboard: %s.' % args)
114
115  return upload_results_to_perf_dashboard.main(args)
116
117def _is_histogram(json_file):
118  return _determine_data_format(json_file) == DATA_FORMAT_HISTOGRAMS
119
120
121def _is_gtest(json_file):
122  return _determine_data_format(json_file) == DATA_FORMAT_GTEST
123
124
125def _determine_data_format(json_file):
126  if json_file not in _data_format_cache:
127    with open(json_file) as f:
128      data = json.load(f)
129      if isinstance(data, list):
130        _data_format_cache[json_file] = DATA_FORMAT_HISTOGRAMS
131      elif isinstance(data, dict):
132        if 'charts' in data:
133          _data_format_cache[json_file] = DATA_FORMAT_CHARTJSON
134        else:
135          _data_format_cache[json_file] = DATA_FORMAT_GTEST
136      else:
137        _data_format_cache[json_file] = DATA_FORMAT_UNKNOWN
138      return _data_format_cache[json_file]
139    _data_format_cache[json_file] = DATA_FORMAT_UNKNOWN
140  return _data_format_cache[json_file]
141
142def _merge_json_output(output_json, jsons_to_merge, extra_links):
143  """Merges the contents of one or more results JSONs.
144
145  Args:
146    output_json: A path to a JSON file to which the merged results should be
147      written.
148    jsons_to_merge: A list of JSON files that should be merged.
149    extra_links: a (key, value) map in which keys are the human-readable strings
150      which describe the data, and value is logdog url that contain the data.
151  """
152  begin_time = time.time()
153  merged_results = results_merger.merge_test_results(jsons_to_merge)
154
155  # Only append the perf results links if present
156  if extra_links:
157    merged_results['links'] = extra_links
158
159  with open(output_json, 'w') as f:
160    json.dump(merged_results, f)
161
162  end_time = time.time()
163  print_duration('Merging json test results', begin_time, end_time)
164  return 0
165
166
167def _handle_perf_json_test_results(
168    benchmark_directory_map, test_results_list):
169  """Checks the test_results.json under each folder:
170
171  1. mark the benchmark 'enabled' if tests results are found
172  2. add the json content to a list for non-ref.
173  """
174  begin_time = time.time()
175  benchmark_enabled_map = {}
176  for benchmark_name, directories in benchmark_directory_map.iteritems():
177    for directory in directories:
178      # Obtain the test name we are running
179      is_ref = '.reference' in benchmark_name
180      enabled = True
181      try:
182        with open(os.path.join(directory, 'test_results.json')) as json_data:
183          json_results = json.load(json_data)
184          if not json_results:
185            # Output is null meaning the test didn't produce any results.
186            # Want to output an error and continue loading the rest of the
187            # test results.
188            logging.warning(
189                'No results produced for %s, skipping upload' % directory)
190            continue
191          if json_results.get('version') == 3:
192            # Non-telemetry tests don't have written json results but
193            # if they are executing then they are enabled and will generate
194            # chartjson results.
195            if not bool(json_results.get('tests')):
196              enabled = False
197          if not is_ref:
198            # We don't need to upload reference build data to the
199            # flakiness dashboard since we don't monitor the ref build
200            test_results_list.append(json_results)
201      except IOError as e:
202        # TODO(crbug.com/936602): Figure out how to surface these errors. Should
203        # we have a non-zero exit code if we error out?
204        logging.error('Failed to obtain test results for %s: %s',
205                      benchmark_name, e)
206        continue
207      if not enabled:
208        # We don't upload disabled benchmarks or tests that are run
209        # as a smoke test
210        logging.info(
211            'Benchmark %s ran no tests on at least one shard' % benchmark_name)
212        continue
213      benchmark_enabled_map[benchmark_name] = True
214
215  end_time = time.time()
216  print_duration('Analyzing perf json test results', begin_time, end_time)
217  return benchmark_enabled_map
218
219
220def _generate_unique_logdog_filename(name_prefix):
221  return name_prefix + '_' + str(uuid.uuid4())
222
223
224def _handle_perf_logs(benchmark_directory_map, extra_links):
225  """ Upload benchmark logs to logdog and add a page entry for them. """
226  begin_time = time.time()
227  benchmark_logs_links = collections.defaultdict(list)
228
229  for benchmark_name, directories in benchmark_directory_map.iteritems():
230    for directory in directories:
231      benchmark_log_file = os.path.join(directory, 'benchmark_log.txt')
232      if os.path.exists(benchmark_log_file):
233        with open(benchmark_log_file) as f:
234          uploaded_link = logdog_helper.text(
235              name=_generate_unique_logdog_filename(benchmark_name),
236              data=f.read())
237          benchmark_logs_links[benchmark_name].append(uploaded_link)
238
239  logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Logs')
240  logdog_stream = logdog_helper.text(
241      logdog_file_name, json.dumps(benchmark_logs_links, sort_keys=True,
242                                   indent=4, separators=(',', ': ')),
243      content_type=JSON_CONTENT_TYPE)
244  extra_links['Benchmarks logs'] = logdog_stream
245  end_time = time.time()
246  print_duration('Generating perf log streams', begin_time, end_time)
247
248
249def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
250  begin_time = time.time()
251  with open(benchmarks_shard_map_file) as f:
252    benchmarks_shard_data = json.load(f)
253    logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map')
254    logdog_stream = logdog_helper.text(
255        logdog_file_name, json.dumps(benchmarks_shard_data, sort_keys=True,
256                                     indent=4, separators=(',', ': ')),
257        content_type=JSON_CONTENT_TYPE)
258    extra_links['Benchmarks shard map'] = logdog_stream
259  end_time = time.time()
260  print_duration('Generating benchmark shard map stream', begin_time, end_time)
261
262
263def _get_benchmark_name(directory):
264  return os.path.basename(directory).replace(" benchmark", "")
265
266
267def _scan_output_dir(task_output_dir):
268  benchmark_directory_map = {}
269  benchmarks_shard_map_file = None
270
271  directory_list = [
272      f for f in os.listdir(task_output_dir)
273      if not os.path.isfile(os.path.join(task_output_dir, f))
274  ]
275  benchmark_directory_list = []
276  for directory in directory_list:
277    for f in os.listdir(os.path.join(task_output_dir, directory)):
278      path = os.path.join(task_output_dir, directory, f)
279      if os.path.isdir(path):
280        benchmark_directory_list.append(path)
281      elif path.endswith('benchmarks_shard_map.json'):
282        benchmarks_shard_map_file = path
283  # Now create a map of benchmark name to the list of directories
284  # the lists were written to.
285  for directory in benchmark_directory_list:
286    benchmark_name = _get_benchmark_name(directory)
287    if benchmark_name in benchmark_directory_map.keys():
288      benchmark_directory_map[benchmark_name].append(directory)
289    else:
290      benchmark_directory_map[benchmark_name] = [directory]
291
292  return benchmark_directory_map, benchmarks_shard_map_file
293
294
295def process_perf_results(output_json,
296                         configuration_name,
297                         build_properties,
298                         task_output_dir,
299                         smoke_test_mode,
300                         output_results_dir,
301                         lightweight=False,
302                         skip_perf=False):
303  """Process perf results.
304
305  Consists of merging the json-test-format output, uploading the perf test
306  output (chartjson and histogram), and store the benchmark logs in logdog.
307
308  Each directory in the task_output_dir represents one benchmark
309  that was run. Within this directory, there is a subdirectory with the name
310  of the benchmark that was run. In that subdirectory, there is a
311  perftest-output.json file containing the performance results in histogram
312  or dashboard json format and an output.json file containing the json test
313  results for the benchmark.
314
315  Returns:
316    (return_code, upload_results_map):
317      return_code is 0 if the whole operation is successful, non zero otherwise.
318      benchmark_upload_result_map: the dictionary that describe which benchmarks
319        were successfully uploaded.
320  """
321  handle_perf = not lightweight or not skip_perf
322  handle_non_perf = not lightweight or skip_perf
323  logging.info('lightweight mode: %r; handle_perf: %r; handle_non_perf: %r' %
324               (lightweight, handle_perf, handle_non_perf))
325
326  begin_time = time.time()
327  return_code = 0
328  benchmark_upload_result_map = {}
329
330  benchmark_directory_map, benchmarks_shard_map_file = _scan_output_dir(
331      task_output_dir)
332
333  test_results_list = []
334  extra_links = {}
335
336  if handle_non_perf:
337    # First, upload benchmarks shard map to logdog and add a page
338    # entry for it in extra_links.
339    if benchmarks_shard_map_file:
340      _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links)
341
342    # Second, upload all the benchmark logs to logdog and add a page entry for
343    # those links in extra_links.
344    _handle_perf_logs(benchmark_directory_map, extra_links)
345
346  # Then try to obtain the list of json test results to merge
347  # and determine the status of each benchmark.
348  benchmark_enabled_map = _handle_perf_json_test_results(
349      benchmark_directory_map, test_results_list)
350
351  if not smoke_test_mode and handle_perf:
352    try:
353      build_properties = json.loads(build_properties)
354      if not configuration_name:
355        # we are deprecating perf-id crbug.com/817823
356        configuration_name = build_properties['buildername']
357
358      return_code, benchmark_upload_result_map = _handle_perf_results(
359          benchmark_enabled_map, benchmark_directory_map,
360          configuration_name, build_properties, extra_links, output_results_dir)
361    except Exception:
362      logging.exception('Error handling perf results jsons')
363      return_code = 1
364
365  if handle_non_perf:
366    # Finally, merge all test results json, add the extra links and write out to
367    # output location
368    _merge_json_output(output_json, test_results_list, extra_links)
369
370  end_time = time.time()
371  print_duration('Total process_perf_results', begin_time, end_time)
372  return return_code, benchmark_upload_result_map
373
374def _merge_chartjson_results(chartjson_dicts):
375  merged_results = chartjson_dicts[0]
376  for chartjson_dict in chartjson_dicts[1:]:
377    for key in chartjson_dict:
378      if key == 'charts':
379        for add_key in chartjson_dict[key]:
380          merged_results[key][add_key] = chartjson_dict[key][add_key]
381  return merged_results
382
383def _merge_histogram_results(histogram_lists):
384  merged_results = []
385  for histogram_list in histogram_lists:
386    merged_results += histogram_list
387
388  return merged_results
389
390def _merge_perf_results(benchmark_name, results_filename, directories):
391  begin_time = time.time()
392  collected_results = []
393  for directory in directories:
394    filename = os.path.join(directory, 'perf_results.json')
395    try:
396      with open(filename) as pf:
397        collected_results.append(json.load(pf))
398    except IOError as e:
399      # TODO(crbug.com/936602): Figure out how to surface these errors. Should
400      # we have a non-zero exit code if we error out?
401      logging.error('Failed to obtain perf results from %s: %s',
402                    directory, e)
403  if not collected_results:
404    logging.error('Failed to obtain any perf results from %s.',
405                  benchmark_name)
406    return
407
408  # Assuming that multiple shards will only be chartjson or histogram set
409  # Non-telemetry benchmarks only ever run on one shard
410  merged_results = []
411  if isinstance(collected_results[0], dict):
412    merged_results = _merge_chartjson_results(collected_results)
413  elif isinstance(collected_results[0], list):
414    merged_results =_merge_histogram_results(collected_results)
415
416  with open(results_filename, 'w') as rf:
417    json.dump(merged_results, rf)
418
419  end_time = time.time()
420  print_duration(('%s results merging' % (benchmark_name)),
421                 begin_time, end_time)
422
423
424def _upload_individual(
425    benchmark_name, directories, configuration_name, build_properties,
426    output_json_file):
427  tmpfile_dir = tempfile.mkdtemp()
428  try:
429    upload_begin_time = time.time()
430    # There are potentially multiple directores with results, re-write and
431    # merge them if necessary
432    results_filename = None
433    if len(directories) > 1:
434      merge_perf_dir = os.path.join(
435          os.path.abspath(tmpfile_dir), benchmark_name)
436      if not os.path.exists(merge_perf_dir):
437        os.makedirs(merge_perf_dir)
438      results_filename = os.path.join(
439          merge_perf_dir, 'merged_perf_results.json')
440      _merge_perf_results(benchmark_name, results_filename, directories)
441    else:
442      # It was only written to one shard, use that shards data
443      results_filename = os.path.join(directories[0], 'perf_results.json')
444
445    results_size_in_mib = os.path.getsize(results_filename) / (2 ** 20)
446    logging.info('Uploading perf results from %s benchmark (size %s Mib)' %
447          (benchmark_name, results_size_in_mib))
448    with open(output_json_file, 'w') as oj:
449      upload_return_code = _upload_perf_results(
450        results_filename,
451        benchmark_name, configuration_name, build_properties, oj)
452      upload_end_time = time.time()
453      print_duration(('%s upload time' % (benchmark_name)),
454                     upload_begin_time, upload_end_time)
455      return (benchmark_name, upload_return_code == 0)
456  finally:
457    shutil.rmtree(tmpfile_dir)
458
459
460def _upload_individual_benchmark(params):
461  try:
462    return _upload_individual(*params)
463  except Exception:
464    benchmark_name = params[0]
465    upload_succeed = False
466    logging.exception('Error uploading perf result of %s' % benchmark_name)
467    return benchmark_name, upload_succeed
468
469
470def _GetCpuCount(log=True):
471  try:
472    return multiprocessing.cpu_count()
473  except NotImplementedError:
474    if log:
475      logging.warn(
476          'Failed to get a CPU count for this bot. See crbug.com/947035.')
477    # TODO(crbug.com/948281): This is currently set to 4 since the mac masters
478    # only have 4 cores. Once we move to all-linux, this can be increased or
479    # we can even delete this whole function and use multiprocessing.cpu_count()
480    # directly.
481    return 4
482
483
484def _handle_perf_results(
485    benchmark_enabled_map, benchmark_directory_map, configuration_name,
486    build_properties, extra_links, output_results_dir):
487  """
488    Upload perf results to the perf dashboard.
489
490    This method also upload the perf results to logdog and augment it to
491    |extra_links|.
492
493    Returns:
494      (return_code, benchmark_upload_result_map)
495      return_code is 0 if this upload to perf dashboard successfully, 1
496        otherwise.
497       benchmark_upload_result_map is a dictionary describes which benchmark
498        was successfully uploaded.
499  """
500  begin_time = time.time()
501  # Upload all eligible benchmarks to the perf dashboard
502  results_dict = {}
503
504  invocations = []
505  for benchmark_name, directories in benchmark_directory_map.iteritems():
506    if not benchmark_enabled_map.get(benchmark_name, False):
507      continue
508    # Create a place to write the perf results that you will write out to
509    # logdog.
510    output_json_file = os.path.join(
511        output_results_dir, (str(uuid.uuid4()) + benchmark_name))
512    results_dict[benchmark_name] = output_json_file
513    #TODO(crbug.com/1072729): pass final arguments instead of build properties
514    # and configuration_name
515    invocations.append((
516        benchmark_name, directories, configuration_name,
517        build_properties, output_json_file))
518
519  # Kick off the uploads in multiple processes
520  # crbug.com/1035930: We are hitting HTTP Response 429. Limit ourselves
521  # to 2 processes to avoid this error. Uncomment the following code once
522  # the problem is fixed on the dashboard side.
523  # pool = multiprocessing.Pool(_GetCpuCount())
524  pool = multiprocessing.Pool(2)
525  upload_result_timeout = False
526  try:
527    async_result = pool.map_async(
528        _upload_individual_benchmark, invocations)
529    # TODO(crbug.com/947035): What timeout is reasonable?
530    results = async_result.get(timeout=4000)
531  except multiprocessing.TimeoutError:
532    upload_result_timeout = True
533    logging.error('Timeout uploading benchmarks to perf dashboard in parallel')
534    results = []
535    for benchmark_name in benchmark_directory_map:
536      results.append((benchmark_name, False))
537  finally:
538    pool.terminate()
539
540  # Keep a mapping of benchmarks to their upload results
541  benchmark_upload_result_map = {}
542  for r in results:
543    benchmark_upload_result_map[r[0]] = r[1]
544
545  logdog_dict = {}
546  upload_failures_counter = 0
547  logdog_stream = None
548  logdog_label = 'Results Dashboard'
549  for benchmark_name, output_file in results_dict.iteritems():
550    upload_succeed = benchmark_upload_result_map[benchmark_name]
551    if not upload_succeed:
552      upload_failures_counter += 1
553    is_reference = '.reference' in benchmark_name
554    _write_perf_data_to_logfile(
555      benchmark_name, output_file,
556      configuration_name, build_properties, logdog_dict,
557      is_reference, upload_failure=not upload_succeed)
558
559  logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
560  logdog_stream = logdog_helper.text(logdog_file_name,
561      json.dumps(dict(logdog_dict), sort_keys=True,
562                  indent=4, separators=(',', ': ')),
563      content_type=JSON_CONTENT_TYPE)
564  if upload_failures_counter > 0:
565    logdog_label += (' %s merge script perf data upload failures' %
566                      upload_failures_counter)
567  extra_links[logdog_label] = logdog_stream
568  end_time = time.time()
569  print_duration('Uploading results to perf dashboard', begin_time, end_time)
570  if upload_result_timeout or upload_failures_counter > 0:
571    return 1, benchmark_upload_result_map
572  return 0, benchmark_upload_result_map
573
574
575def _write_perf_data_to_logfile(benchmark_name, output_file,
576    configuration_name, build_properties,
577    logdog_dict, is_ref, upload_failure):
578  viewer_url = None
579  # logdog file to write perf results to
580  if os.path.exists(output_file):
581    results = None
582    with open(output_file) as f:
583      try:
584        results = json.load(f)
585      except ValueError:
586        logging.error('Error parsing perf results JSON for benchmark  %s' %
587              benchmark_name)
588    if results:
589      try:
590        output_json_file = logdog_helper.open_text(benchmark_name)
591        json.dump(results, output_json_file,
592                  indent=4, separators=(',', ': '))
593      except ValueError as e:
594        logging.error('ValueError: "%s" while dumping output to logdog' % e)
595      finally:
596        output_json_file.close()
597      viewer_url = output_json_file.get_viewer_url()
598  else:
599    logging.warning("Perf results JSON file doesn't exist for benchmark %s" %
600          benchmark_name)
601
602  base_benchmark_name = benchmark_name.replace('.reference', '')
603
604  if base_benchmark_name not in logdog_dict:
605    logdog_dict[base_benchmark_name] = {}
606
607  # add links for the perf results and the dashboard url to
608  # the logs section of buildbot
609  if is_ref:
610    if viewer_url:
611      logdog_dict[base_benchmark_name]['perf_results_ref'] = viewer_url
612    if upload_failure:
613      logdog_dict[base_benchmark_name]['ref_upload_failed'] = 'True'
614  else:
615    logdog_dict[base_benchmark_name]['dashboard_url'] = (
616        upload_results_to_perf_dashboard.GetDashboardUrl(
617            benchmark_name,
618            configuration_name, RESULTS_URL,
619            build_properties['got_revision_cp'],
620            _GetMachineGroup(build_properties)))
621    if viewer_url:
622      logdog_dict[base_benchmark_name]['perf_results'] = viewer_url
623    if upload_failure:
624      logdog_dict[base_benchmark_name]['upload_failed'] = 'True'
625
626
627def print_duration(step, start, end):
628  logging.info('Duration of %s: %d seconds' % (step, end - start))
629
630
631def main():
632  """ See collect_task.collect_task for more on the merge script API. """
633  logging.info(sys.argv)
634  parser = argparse.ArgumentParser()
635  # configuration-name (previously perf-id) is the name of bot the tests run on
636  # For example, buildbot-test is the name of the android-go-perf bot
637  # configuration-name and results-url are set in the json file which is going
638  # away tools/perf/core/chromium.perf.fyi.extras.json
639  parser.add_argument('--configuration-name', help=argparse.SUPPRESS)
640
641  parser.add_argument('--build-properties', help=argparse.SUPPRESS)
642  parser.add_argument('--summary-json', help=argparse.SUPPRESS)
643  parser.add_argument('--task-output-dir', help=argparse.SUPPRESS)
644  parser.add_argument('-o', '--output-json', required=True,
645                      help=argparse.SUPPRESS)
646  parser.add_argument(
647      '--skip-perf',
648      action='store_true',
649      help='In lightweight mode, this indicates the workflow is from processor,'
650      ' otherwise its value is ignored.')
651  parser.add_argument(
652      '--lightweight',
653      action='store_true',
654      help='Choose the lightweight mode in which the perf result handling'
655      ' is performed on a separate VM.')
656  parser.add_argument('json_files', nargs='*', help=argparse.SUPPRESS)
657  parser.add_argument('--smoke-test-mode', action='store_true',
658                      help='This test should be run in smoke test mode'
659                      ' meaning it does not upload to the perf dashboard')
660
661  args = parser.parse_args()
662
663  output_results_dir = tempfile.mkdtemp('outputresults')
664  try:
665    return_code, _ = process_perf_results(
666        args.output_json, args.configuration_name, args.build_properties,
667        args.task_output_dir, args.smoke_test_mode, output_results_dir,
668        args.lightweight, args.skip_perf)
669    return return_code
670  finally:
671    shutil.rmtree(output_results_dir)
672
673
674if __name__ == '__main__':
675  sys.exit(main())
676