1# Copyright 2014 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import argparse
6import contextlib
7import io
8import json
9import os
10import logging
11import subprocess
12import sys
13import tempfile
14import time
15import traceback
16
17# Add src/testing/ into sys.path for importing xvfb.
18sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
19import xvfb
20import test_env
21
22# Unfortunately we need to copy these variables from ../test_env.py.
23# Importing it and using its get_sandbox_env breaks test runs on Linux
24# (it seems to unset DISPLAY).
25CHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX'
26CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
27
28
29SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
30SRC_DIR = os.path.abspath(
31    os.path.join(SCRIPT_DIR, os.path.pardir, os.path.pardir))
32
33
34# run_web_tests.py returns the number of failures as the return
35# code, but caps the return code at 101 to avoid overflow or colliding
36# with reserved values from the shell.
37MAX_FAILURES_EXIT_STATUS = 101
38
39
40# Exit code to indicate infrastructure issue.
41INFRA_FAILURE_EXIT_CODE = 87
42
43
44def run_script(argv, funcs):
45  def parse_json(path):
46    with open(path) as f:
47      return json.load(f)
48  parser = argparse.ArgumentParser()
49  # TODO(phajdan.jr): Make build-config-fs required after passing it in recipe.
50  parser.add_argument('--build-config-fs')
51  parser.add_argument('--paths', type=parse_json, default={})
52  # Properties describe the environment of the build, and are the same per
53  # script invocation.
54  parser.add_argument('--properties', type=parse_json, default={})
55  # Args contains per-invocation arguments that potentially change the
56  # behavior of the script.
57  parser.add_argument('--args', type=parse_json, default=[])
58
59  subparsers = parser.add_subparsers()
60
61  run_parser = subparsers.add_parser('run')
62  run_parser.add_argument(
63      '--output', type=argparse.FileType('w'), required=True)
64  run_parser.add_argument('--filter-file', type=argparse.FileType('r'))
65  run_parser.set_defaults(func=funcs['run'])
66
67  run_parser = subparsers.add_parser('compile_targets')
68  run_parser.add_argument(
69      '--output', type=argparse.FileType('w'), required=True)
70  run_parser.set_defaults(func=funcs['compile_targets'])
71
72  args = parser.parse_args(argv)
73  return args.func(args)
74
75
76def run_command(argv, env=None, cwd=None):
77  print 'Running %r in %r (env: %r)' % (argv, cwd, env)
78  rc = test_env.run_command(argv, env=env, cwd=cwd)
79  print 'Command %r returned exit code %d' % (argv, rc)
80  return rc
81
82
83@contextlib.contextmanager
84def temporary_file():
85  fd, path = tempfile.mkstemp()
86  os.close(fd)
87  try:
88    yield path
89  finally:
90    os.remove(path)
91
92
93def parse_common_test_results(json_results, test_separator='/'):
94  def convert_trie_to_flat_paths(trie, prefix=None):
95    # Also see blinkpy.web_tests.layout_package.json_results_generator
96    result = {}
97    for name, data in trie.iteritems():
98      if prefix:
99        name = prefix + test_separator + name
100      if len(data) and not 'actual' in data and not 'expected' in data:
101        result.update(convert_trie_to_flat_paths(data, name))
102      else:
103        result[name] = data
104    return result
105
106  results = {
107    'passes': {},
108    'unexpected_passes': {},
109    'failures': {},
110    'unexpected_failures': {},
111    'flakes': {},
112    'unexpected_flakes': {},
113  }
114
115  # TODO(dpranke): crbug.com/357866 - we should simplify the handling of
116  # both the return code and parsing the actual results, below.
117
118  passing_statuses = ('PASS', 'SLOW', 'NEEDSREBASELINE')
119
120  for test, result in convert_trie_to_flat_paths(
121      json_results['tests']).iteritems():
122    key = 'unexpected_' if result.get('is_unexpected') else ''
123    data = result['actual']
124    actual_results = data.split()
125    last_result = actual_results[-1]
126    expected_results = result['expected'].split()
127
128    if (len(actual_results) > 1 and
129        (last_result in expected_results or last_result in passing_statuses)):
130      key += 'flakes'
131    elif last_result in passing_statuses:
132      key += 'passes'
133      # TODO(dpranke): crbug.com/357867 ...  Why are we assigning result
134      # instead of actual_result here. Do we even need these things to be
135      # hashes, or just lists?
136      data = result
137    else:
138      key += 'failures'
139    results[key][test] = data
140
141  return results
142
143
144def write_interrupted_test_results_to(filepath, test_start_time):
145  """Writes a test results JSON file* to filepath.
146
147  This JSON file is formatted to explain that something went wrong.
148
149  *src/docs/testing/json_test_results_format.md
150
151  Args:
152    filepath: A path to a file to write the output to.
153    test_start_time: The start time of the test run expressed as a
154      floating-point offset in seconds from the UNIX epoch.
155  """
156  with open(filepath, 'w') as fh:
157    output = {
158        'interrupted': True,
159        'num_failures_by_type': {},
160        'seconds_since_epoch': test_start_time,
161        'tests': {},
162        'version': 3,
163    }
164    json.dump(output, fh)
165
166
167def get_gtest_summary_passes(output):
168  """Returns a mapping of test to boolean indicating if the test passed.
169
170  Only partially parses the format. This code is based on code in tools/build,
171  specifically
172  https://chromium.googlesource.com/chromium/tools/build/+/17fef98756c5f250b20bf716829a0004857235ff/scripts/slave/recipe_modules/test_utils/util.py#189
173  """
174  if not output:
175    return {}
176
177  mapping = {}
178
179  for cur_iteration_data in output.get('per_iteration_data', []):
180    for test_fullname, results in cur_iteration_data.iteritems():
181      # Results is a list with one entry per test try. Last one is the final
182      # result.
183      last_result = results[-1]
184
185      if last_result['status'] == 'SUCCESS':
186        mapping[test_fullname] = True
187      elif last_result['status'] != 'SKIPPED':
188        mapping[test_fullname] = False
189
190  return mapping
191
192
193def extract_filter_list(filter_list):
194  """Helper for isolated script test wrappers. Parses the
195  --isolated-script-test-filter command line argument. Currently, double-colon
196  ('::') is used as the separator between test names, because a single colon may
197  be used in the names of perf benchmarks, which contain URLs.
198  """
199  return filter_list.split('::')
200
201
202class BaseIsolatedScriptArgsAdapter(object):
203  """The base class for all script adapters that need to translate flags
204  set by isolated script test contract into the specific test script's flags.
205  """
206
207  def __init__(self):
208    self._parser = argparse.ArgumentParser()
209    self._options = None
210    self._rest_args = None
211    self._parser.add_argument(
212        '--isolated-script-test-output', type=str,
213        required=True)
214    self._parser.add_argument(
215        '--isolated-script-test-filter', type=str,
216        required=False)
217    self._parser.add_argument(
218        '--isolated-script-test-repeat', type=int,
219        required=False)
220    self._parser.add_argument(
221        '--isolated-script-test-launcher-retry-limit', type=int,
222        required=False)
223    self._parser.add_argument(
224        '--isolated-script-test-also-run-disabled-tests',
225        default=False, action='store_true', required=False)
226
227    self._parser.add_argument('--xvfb', help='start xvfb', action='store_true')
228
229    # This argument is ignored for now.
230    self._parser.add_argument(
231        '--isolated-script-test-chartjson-output', type=str)
232    # This argument is ignored for now.
233    self._parser.add_argument('--isolated-script-test-perf-output', type=str)
234
235    self.add_extra_arguments(self._parser)
236
237  def add_extra_arguments(self, parser):
238    pass
239
240  def parse_args(self, args=None):
241    self._options, self._rest_args = self._parser.parse_known_args(args)
242
243  @property
244  def parser(self):
245    return self._parser
246
247  @property
248  def options(self):
249    return self._options
250
251  @property
252  def rest_args(self):
253    return self._rest_args
254
255  def generate_test_output_args(self, output):
256    del output  # unused
257    raise RuntimeError('this method is not yet implemented')
258
259  def generate_test_filter_args(self, test_filter_str):
260    del test_filter_str  # unused
261    raise RuntimeError('this method is not yet implemented')
262
263  def generate_test_repeat_args(self, repeat_count):
264    del repeat_count  # unused
265    raise RuntimeError('this method is not yet implemented')
266
267  def generate_test_launcher_retry_limit_args(self, retry_limit):
268    del retry_limit  # unused
269    raise RuntimeError('this method is not yet implemented')
270
271  def generate_test_also_run_disabled_tests_args(self):
272    raise RuntimeError('this method is not yet implemented')
273
274  def generate_sharding_args(self, total_shard, shard_index):
275    del total_shard, shard_index  # unused
276    raise RuntimeError('this method is not yet implemented')
277
278  def generate_isolated_script_cmd(self):
279    isolated_script_cmd = [sys.executable] + self.rest_args
280
281    isolated_script_cmd += self.generate_test_output_args(
282        self.options.isolated_script_test_output)
283
284    # Augment test filter args if needed
285    if self.options.isolated_script_test_filter:
286      isolated_script_cmd += self.generate_test_filter_args(
287          self.options.isolated_script_test_filter)
288
289    # Augment test repeat if needed
290    if self.options.isolated_script_test_repeat is not None:
291      isolated_script_cmd += self.generate_test_repeat_args(
292          self.options.isolated_script_test_repeat)
293
294    # Augment test launcher retry limit args if needed
295    if self.options.isolated_script_test_launcher_retry_limit is not None:
296      isolated_script_cmd += self.generate_test_launcher_retry_limit_args(
297          self.options.isolated_script_test_launcher_retry_limit)
298
299    # Augment test also run disable tests args if needed
300    if self.options.isolated_script_test_also_run_disabled_tests:
301      isolated_script_cmd += self.generate_test_also_run_disabled_tests_args()
302
303    # Augment shard args if needed
304    env = os.environ.copy()
305
306    total_shards = None
307    shard_index = None
308
309    if 'GTEST_TOTAL_SHARDS' in env:
310      total_shards = int(env['GTEST_TOTAL_SHARDS'])
311    if 'GTEST_SHARD_INDEX' in env:
312      shard_index = int(env['GTEST_SHARD_INDEX'])
313    if total_shards is not None and shard_index is not None:
314      isolated_script_cmd += self.generate_sharding_args(
315          total_shards, shard_index)
316
317    return isolated_script_cmd
318
319  def clean_up_after_test_run(self):
320    pass
321
322  def run_test(self):
323    self.parse_args()
324    cmd = self.generate_isolated_script_cmd()
325
326    env = os.environ.copy()
327
328    # Assume we want to set up the sandbox environment variables all the
329    # time; doing so is harmless on non-Linux platforms and is needed
330    # all the time on Linux.
331    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
332    valid = True
333    try:
334      env['CHROME_HEADLESS'] = '1'
335      print 'Running command: %s\nwith env: %r' % (
336          ' '.join(cmd), env)
337      if self.options.xvfb:
338        exit_code = xvfb.run_executable(cmd, env)
339      else:
340        exit_code = test_env.run_command(cmd, env=env)
341      print 'Command returned exit code %d' % exit_code
342      return exit_code
343    except Exception:
344      traceback.print_exc()
345      valid = False
346    finally:
347      self.clean_up_after_test_run()
348
349    if not valid:
350      failures = ['(entire test suite)']
351      with open(self.options.isolated_script_test_output, 'w') as fp:
352        json.dump({
353            'valid': valid,
354            'failures': failures,
355        }, fp)
356
357    return 1
358