1# Copyright (c) 2012 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5
6import logging
7import re
8import os
9
10import constants
11from perf_tests_helper import PrintPerfResult
12from pylib import pexpect
13from test_result import BaseTestResult, TestResults
14
15
16# TODO(bulach): TestPackage, TestPackageExecutable and
17# TestPackageApk are a work in progress related to making the native tests
18# run as a NDK-app from an APK rather than a stand-alone executable.
19class TestPackage(object):
20  """A helper base class for both APK and stand-alone executables.
21
22  Args:
23    adb: ADB interface the tests are using.
24    device: Device to run the tests.
25    test_suite: A specific test suite to run, empty to run all.
26    timeout: Timeout for each test.
27    rebaseline: Whether or not to run tests in isolation and update the filter.
28    performance_test: Whether or not performance test(s).
29    cleanup_test_files: Whether or not to cleanup test files on device.
30    tool: Name of the Valgrind tool.
31    dump_debug_info: A debug_info object.
32  """
33
34  def __init__(self, adb, device, test_suite, timeout, rebaseline,
35               performance_test, cleanup_test_files, tool, dump_debug_info):
36    self.adb = adb
37    self.device = device
38    self.test_suite_full = test_suite
39    self.test_suite = os.path.splitext(test_suite)[0]
40    self.test_suite_basename = self._GetTestSuiteBaseName()
41    self.test_suite_dirname = os.path.dirname(
42        self.test_suite.split(self.test_suite_basename)[0])
43    self.rebaseline = rebaseline
44    self.performance_test = performance_test
45    self.cleanup_test_files = cleanup_test_files
46    self.tool = tool
47    if timeout == 0:
48      timeout = 60
49    # On a VM (e.g. chromium buildbots), this timeout is way too small.
50    if os.environ.get('BUILDBOT_SLAVENAME'):
51      timeout = timeout * 2
52    self.timeout = timeout * self.tool.GetTimeoutScale()
53    self.dump_debug_info = dump_debug_info
54
55  def _BeginGetIOStats(self):
56    """Gets I/O statistics before running test.
57
58    Return:
59      I/O stats object.The I/O stats object may be None if the test is not
60      performance test.
61    """
62    initial_io_stats = None
63    # Try to get the disk I/O statistics for all performance tests.
64    if self.performance_test and not self.rebaseline:
65      initial_io_stats = self.adb.GetIoStats()
66    return initial_io_stats
67
68  def _EndGetIOStats(self, initial_io_stats):
69    """Gets I/O statistics after running test and calcuate the I/O delta.
70
71    Args:
72      initial_io_stats: I/O stats object got from _BeginGetIOStats.
73
74    Return:
75      String for formated diso I/O statistics.
76    """
77    disk_io = ''
78    if self.performance_test and initial_io_stats:
79      final_io_stats = self.adb.GetIoStats()
80      for stat in final_io_stats:
81        disk_io += '\n' + PrintPerfResult(stat, stat,
82                                          [final_io_stats[stat] -
83                                           initial_io_stats[stat]],
84                                          stat.split('_')[1],
85                                          print_to_stdout=False)
86      logging.info(disk_io)
87    return disk_io
88
89  def GetDisabledPrefixes(self):
90    return ['DISABLED_', 'FLAKY_', 'FAILS_']
91
92  def _ParseGTestListTests(self, all_tests):
93    ret = []
94    current = ''
95    disabled_prefixes = self.GetDisabledPrefixes()
96    for test in all_tests:
97      if not test:
98        continue
99      if test[0] != ' ' and not test.endswith('.'):
100        # Ignore any lines with unexpected format.
101        continue
102      if test[0] != ' ' and test.endswith('.'):
103        current = test
104        continue
105      if 'YOU HAVE' in test:
106        break
107      test_name = test[2:]
108      if not any([test_name.startswith(x) for x in disabled_prefixes]):
109        ret += [current + test_name]
110    return ret
111
112  def PushDataAndPakFiles(self):
113    external_storage = self.adb.GetExternalStorage()
114    if (self.test_suite_basename == 'ui_unittests' or
115        self.test_suite_basename == 'unit_tests'):
116      self.adb.PushIfNeeded(
117          self.test_suite_dirname + '/chrome.pak',
118          external_storage + '/paks/chrome.pak')
119      self.adb.PushIfNeeded(
120          self.test_suite_dirname + '/locales/en-US.pak',
121          external_storage + '/paks/en-US.pak')
122    if self.test_suite_basename == 'unit_tests':
123      self.adb.PushIfNeeded(
124          self.test_suite_dirname + '/resources.pak',
125          external_storage + '/paks/resources.pak')
126      self.adb.PushIfNeeded(
127          self.test_suite_dirname + '/chrome_100_percent.pak',
128          external_storage + '/paks/chrome_100_percent.pak')
129      self.adb.PushIfNeeded(self.test_suite_dirname + '/test_data',
130                            external_storage + '/test_data')
131    if self.test_suite_basename == 'content_unittests':
132      self.adb.PushIfNeeded(
133          self.test_suite_dirname + '/content_resources.pak',
134          external_storage + '/paks/content_resources.pak')
135
136  def _WatchTestOutput(self, p):
137    """Watches the test output.
138    Args:
139      p: the process generating output as created by pexpect.spawn.
140    """
141    ok_tests = []
142    failed_tests = []
143    crashed_tests = []
144    timed_out = False
145    overall_fail = False
146    re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
147    # APK tests rely on the PASSED tag.
148    re_passed = re.compile('\[  PASSED  \] ?(.*)\r\n')
149    # Signal handlers are installed before starting tests
150    # to output the CRASHED marker when a crash happens.
151    re_crash = re.compile('\[ CRASHED      \](.*)\r\n')
152    re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
153    re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
154    re_ok = re.compile('\[       OK \] ?(.*?) .*\r\n')
155    io_stats_before = self._BeginGetIOStats()
156    try:
157      while True:
158        found = p.expect([re_run, re_passed, re_runner_fail],
159                         timeout=self.timeout)
160        if found == 1:  # matched PASSED.
161          break
162        if found == 2:  # RUNNER_FAILED
163          logging.error('RUNNER_FAILED')
164          overall_fail = True
165          break
166        if self.dump_debug_info:
167          self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')
168        full_test_name = p.match.group(1).replace('\r', '')
169        found = p.expect([re_ok, re_fail, re_crash], timeout=self.timeout)
170        if found == 0:  # re_ok
171          if full_test_name == p.match.group(1).replace('\r', ''):
172            ok_tests += [BaseTestResult(full_test_name, p.before)]
173            continue
174        if found == 2: # re_crash
175          crashed_tests += [BaseTestResult(full_test_name, p.before)]
176          overall_fail = True
177          break
178        # The test failed.
179        failed_tests += [BaseTestResult(full_test_name, p.before)]
180    except pexpect.EOF:
181      logging.error('Test terminated - EOF')
182    except pexpect.TIMEOUT:
183      logging.error('Test terminated after %d second timeout.',
184                    self.timeout)
185      timed_out = True
186    finally:
187      p.close()
188    if not self.rebaseline:
189      ok_tests += self._EndGetIOStats(io_stats_before)
190      ret_code = self._GetGTestReturnCode()
191      if ret_code:
192        failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code,
193                                        'pexpect.before: %s'
194                                        '\npexpect.after: %s'
195                                        % (p.before,
196                                           p.after))]
197    # Create TestResults and return
198    return TestResults.FromRun(ok=ok_tests, failed=failed_tests,
199                               crashed=crashed_tests, timed_out=timed_out,
200                               overall_fail=overall_fail)
201