1# Copyright (c) 2012 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Runs the Java tests. See more information on run_instrumentation_tests.py."""
6
7import fnmatch
8import logging
9import os
10import re
11import shutil
12import sys
13import time
14
15import android_commands
16import apk_info
17from base_test_runner import BaseTestRunner
18from base_test_sharder import BaseTestSharder, SetTestsContainer
19import cmd_helper
20import constants
21import errors
22from forwarder import Forwarder
23from json_perf_parser import GetAverageRunInfoFromJSONString
24from perf_tests_helper import PrintPerfResult
25import sharded_tests_queue
26from test_result import SingleTestResult, TestResults
27import valgrind_tools
28
29_PERF_TEST_ANNOTATION = 'PerfTest'
30
31
32class FatalTestException(Exception):
33  """A fatal test exception."""
34  pass
35
36
37def _TestNameToExpectation(test_name):
38  # A test name is a Package.Path.Class#testName; convert to what we use in
39  # the expectation file.
40  return '.'.join(test_name.replace('#', '.').split('.')[-2:])
41
42
43def FilterTests(test_names, pattern_list, inclusive):
44  """Filters |test_names| using a list of patterns.
45
46  Args:
47    test_names: A list of test names.
48    pattern_list: A list of patterns.
49    inclusive: If True, returns the tests that match any pattern. if False,
50               returns the tests that do not match any pattern.
51  Returns:
52    A list of test names.
53  """
54  ret = []
55  for t in test_names:
56    has_match = False
57    for pattern in pattern_list:
58      has_match = has_match or fnmatch.fnmatch(_TestNameToExpectation(t),
59                                               pattern)
60    if has_match == inclusive:
61      ret += [t]
62  return ret
63
64
65class TestRunner(BaseTestRunner):
66  """Responsible for running a series of tests connected to a single device."""
67
68  _DEVICE_DATA_DIR = 'chrome/test/data'
69  _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''),
70                           'external/emma/lib/emma.jar')
71  _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es'
72  _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR')
73  _COVERAGE_FILENAME = 'coverage.ec'
74  _COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' +
75                           _COVERAGE_FILENAME)
76  _COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP',
77                                                         ''),
78                                          'out/target/common/obj/APPS',
79                                          'Chrome_intermediates/coverage.em')
80  _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
81  _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
82                                       '/chrome-profile*')
83  _DEVICE_HAS_TEST_FILES = {}
84
85  def __init__(self, options, device, tests_iter, coverage, shard_index, apks,
86               ports_to_forward):
87    """Create a new TestRunner.
88
89    Args:
90      options: An options object with the following required attributes:
91      -  build_type: 'Release' or 'Debug'.
92      -  install_apk: Re-installs the apk if opted.
93      -  save_perf_json: Whether or not to save the JSON file from UI perf
94            tests.
95      -  screenshot_failures: Take a screenshot for a test failure
96      -  tool: Name of the Valgrind tool.
97      -  wait_for_debugger: blocks until the debugger is connected.
98      device: Attached android device.
99      tests_iter: A list of tests to be run.
100      coverage: Collects coverage information if opted.
101      shard_index: shard # for this TestRunner, used to create unique port
102          numbers.
103      apks: A list of ApkInfo objects need to be installed. The first element
104            should be the tests apk, the rests could be the apks used in test.
105            The default is ChromeTest.apk.
106      ports_to_forward: A list of port numbers for which to set up forwarders.
107                        Can be optionally requested by a test case.
108    Raises:
109      FatalTestException: if coverage metadata is not available.
110    """
111    BaseTestRunner.__init__(
112        self, device, options.tool, shard_index, options.build_type)
113
114    if not apks:
115      apks = [apk_info.ApkInfo(options.test_apk_path,
116                               options.test_apk_jar_path)]
117
118    self.build_type = options.build_type
119    self.install_apk = options.install_apk
120    self.save_perf_json = options.save_perf_json
121    self.screenshot_failures = options.screenshot_failures
122    self.wait_for_debugger = options.wait_for_debugger
123
124    self.tests_iter = tests_iter
125    self.coverage = coverage
126    self.apks = apks
127    self.test_apk = apks[0]
128    self.instrumentation_class_path = self.test_apk.GetPackageName()
129    self.ports_to_forward = ports_to_forward
130
131    self.test_results = TestResults()
132    self.forwarder = None
133
134    if self.coverage:
135      if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME):
136        os.remove(TestRunner._COVERAGE_MERGED_FILENAME)
137      if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH):
138        raise FatalTestException('FATAL ERROR in ' + sys.argv[0] +
139                                 ' : Coverage meta info [' +
140                                 TestRunner._COVERAGE_META_INFO_PATH +
141                                 '] does not exist.')
142      if (not TestRunner._COVERAGE_WEB_ROOT_DIR or
143          not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)):
144        raise FatalTestException('FATAL ERROR in ' + sys.argv[0] +
145                                 ' : Path specified in $EMMA_WEB_ROOTDIR [' +
146                                 TestRunner._COVERAGE_WEB_ROOT_DIR +
147                                 '] does not exist.')
148
149  def _GetTestsIter(self):
150    if not self.tests_iter:
151      # multiprocessing.Queue can't be pickled across processes if we have it as
152      # a member set during constructor.  Grab one here instead.
153      self.tests_iter = (BaseTestSharder.tests_container)
154    assert self.tests_iter
155    return self.tests_iter
156
157  def CopyTestFilesOnce(self):
158    """Pushes the test data files to the device. Installs the apk if opted."""
159    if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False):
160      logging.warning('Already copied test files to device %s, skipping.',
161                      self.device)
162      return
163    host_test_files = [
164        ('android_webview/test/data/device_files', 'webview'),
165        ('content/test/data/android/device_files', 'content'),
166        ('chrome/test/data/android/device_files', 'chrome')
167    ]
168    for (host_src, dst_layer) in host_test_files:
169      host_test_files_path = constants.CHROME_DIR + '/' + host_src
170      if os.path.exists(host_test_files_path):
171        self.adb.PushIfNeeded(host_test_files_path,
172                              self.adb.GetExternalStorage() + '/' +
173                              TestRunner._DEVICE_DATA_DIR + '/' + dst_layer)
174    if self.install_apk:
175      for apk in self.apks:
176        self.adb.ManagedInstall(apk.GetApkPath(),
177                                package_name=apk.GetPackageName())
178    self.tool.CopyFiles()
179    TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True
180
181  def SaveCoverageData(self, test):
182    """Saves the Emma coverage data before it's overwritten by the next test.
183
184    Args:
185      test: the test whose coverage data is collected.
186    """
187    if not self.coverage:
188      return
189    if not self.adb.Adb().Pull(TestRunner._COVERAGE_RESULT_PATH,
190                               constants.CHROME_DIR):
191      logging.error('ERROR: Unable to find file ' +
192                    TestRunner._COVERAGE_RESULT_PATH +
193                    ' on the device for test ' + test)
194    pulled_coverage_file = os.path.join(constants.CHROME_DIR,
195                                        TestRunner._COVERAGE_FILENAME)
196    if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME):
197      cmd = ['java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'merge',
198             '-in', pulled_coverage_file,
199             '-in', TestRunner._COVERAGE_MERGED_FILENAME,
200             '-out', TestRunner._COVERAGE_MERGED_FILENAME]
201      cmd_helper.RunCmd(cmd)
202    else:
203      shutil.copy(pulled_coverage_file,
204                  TestRunner._COVERAGE_MERGED_FILENAME)
205    os.remove(pulled_coverage_file)
206
207  def GenerateCoverageReportIfNeeded(self):
208    """Uses the Emma to generate a coverage report and a html page."""
209    if not self.coverage:
210      return
211    cmd = ['java', '-classpath', TestRunner._EMMA_JAR,
212           'emma', 'report', '-r', 'html',
213           '-in', TestRunner._COVERAGE_MERGED_FILENAME,
214           '-in', TestRunner._COVERAGE_META_INFO_PATH]
215    cmd_helper.RunCmd(cmd)
216    new_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR,
217                           time.strftime('Coverage_for_%Y_%m_%d_%a_%H:%M'))
218    shutil.copytree('coverage', new_dir)
219
220    latest_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR,
221                              'Latest_Coverage_Run')
222    if os.path.exists(latest_dir):
223      shutil.rmtree(latest_dir)
224    os.mkdir(latest_dir)
225    webserver_new_index = os.path.join(new_dir, 'index.html')
226    webserver_new_files = os.path.join(new_dir, '_files')
227    webserver_latest_index = os.path.join(latest_dir, 'index.html')
228    webserver_latest_files = os.path.join(latest_dir, '_files')
229    # Setup new softlinks to last result.
230    os.symlink(webserver_new_index, webserver_latest_index)
231    os.symlink(webserver_new_files, webserver_latest_files)
232    cmd_helper.RunCmd(['chmod', '755', '-R', latest_dir, new_dir])
233
234  def _GetInstrumentationArgs(self):
235    ret = {}
236    if self.coverage:
237      ret['coverage'] = 'true'
238    if self.wait_for_debugger:
239      ret['debug'] = 'true'
240    return ret
241
242  def _TakeScreenshot(self, test):
243    """Takes a screenshot from the device."""
244    screenshot_tool = os.path.join(constants.CHROME_DIR,
245        'third_party/android_tools/sdk/tools/monkeyrunner')
246    screenshot_script = os.path.join(constants.CHROME_DIR,
247        'build/android/monkeyrunner_screenshot.py')
248    screenshot_path = os.path.join(constants.CHROME_DIR,
249                                   'out_screenshots')
250    if not os.path.exists(screenshot_path):
251      os.mkdir(screenshot_path)
252    screenshot_name = os.path.join(screenshot_path, test + '.png')
253    logging.info('Taking screenshot named %s', screenshot_name)
254    cmd_helper.RunCmd([screenshot_tool, screenshot_script,
255                       '--serial', self.device,
256                       '--file', screenshot_name])
257
258  def SetUp(self):
259    """Sets up the test harness and device before all tests are run."""
260    super(TestRunner, self).SetUp()
261    if not self.adb.IsRootEnabled():
262      logging.warning('Unable to enable java asserts for %s, non rooted device',
263                      self.device)
264    else:
265      if self.adb.SetJavaAssertsEnabled(enable=True):
266        self.adb.Reboot(full_reboot=False)
267
268    # We give different default value to launch HTTP server based on shard index
269    # because it may have race condition when multiple processes are trying to
270    # launch lighttpd with same port at same time.
271    http_server_ports = self.LaunchTestHttpServer(
272        os.path.join(constants.CHROME_DIR),
273        (constants.LIGHTTPD_RANDOM_PORT_FIRST + self.shard_index))
274    if self.ports_to_forward:
275      port_pairs = [(port, port) for port in self.ports_to_forward]
276      # We need to remember which ports the HTTP server is using, since the
277      # forwarder will stomp on them otherwise.
278      port_pairs.append(http_server_ports)
279      self.forwarder = Forwarder(
280         self.adb, port_pairs, self.tool, '127.0.0.1', self.build_type)
281    self.CopyTestFilesOnce()
282    self.flags.AddFlags(['--enable-test-intents'])
283
284  def TearDown(self):
285    """Cleans up the test harness and saves outstanding data from test run."""
286    if self.forwarder:
287      self.forwarder.Close()
288    self.GenerateCoverageReportIfNeeded()
289    super(TestRunner, self).TearDown()
290
291  def TestSetup(self, test):
292    """Sets up the test harness for running a particular test.
293
294    Args:
295      test: The name of the test that will be run.
296    """
297    self.SetupPerfMonitoringIfNeeded(test)
298    self._SetupIndividualTestTimeoutScale(test)
299    self.tool.SetupEnvironment()
300
301    # Make sure the forwarder is still running.
302    self.RestartHttpServerForwarderIfNecessary()
303
304  def _IsPerfTest(self, test):
305    """Determines whether a test is a performance test.
306
307    Args:
308      test: The name of the test to be checked.
309
310    Returns:
311      Whether the test is annotated as a performance test.
312    """
313    return _PERF_TEST_ANNOTATION in self.test_apk.GetTestAnnotations(test)
314
315  def SetupPerfMonitoringIfNeeded(self, test):
316    """Sets up performance monitoring if the specified test requires it.
317
318    Args:
319      test: The name of the test to be run.
320    """
321    if not self._IsPerfTest(test):
322      return
323    self.adb.Adb().SendCommand('shell rm ' +
324                               TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
325    self.adb.StartMonitoringLogcat()
326
327  def TestTeardown(self, test, test_result):
328    """Cleans up the test harness after running a particular test.
329
330    Depending on the options of this TestRunner this might handle coverage
331    tracking or performance tracking.  This method will only be called if the
332    test passed.
333
334    Args:
335      test: The name of the test that was just run.
336      test_result: result for this test.
337    """
338
339    self.tool.CleanUpEnvironment()
340
341    # The logic below relies on the test passing.
342    if not test_result or test_result.GetStatusCode():
343      return
344
345    self.TearDownPerfMonitoring(test)
346    self.SaveCoverageData(test)
347
348  def TearDownPerfMonitoring(self, test):
349    """Cleans up performance monitoring if the specified test required it.
350
351    Args:
352      test: The name of the test that was just run.
353    Raises:
354      FatalTestException: if there's anything wrong with the perf data.
355    """
356    if not self._IsPerfTest(test):
357      return
358    raw_test_name = test.split('#')[1]
359
360    # Wait and grab annotation data so we can figure out which traces to parse
361    regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' +
362                                                raw_test_name +
363                                                '\)\:(.*)'), None)
364
365    # If the test is set to run on a specific device type only (IE: only
366    # tablet or phone) and it is being run on the wrong device, the test
367    # just quits and does not do anything.  The java test harness will still
368    # print the appropriate annotation for us, but will add --NORUN-- for
369    # us so we know to ignore the results.
370    # The --NORUN-- tag is managed by MainActivityTestBase.java
371    if regex.group(1) != '--NORUN--':
372
373      # Obtain the relevant perf data.  The data is dumped to a
374      # JSON formatted file.
375      json_string = self.adb.GetFileContents(
376          '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt')
377
378      if json_string:
379        json_string = '\n'.join(json_string)
380      else:
381        raise FatalTestException('Perf file does not exist or is empty')
382
383      if self.save_perf_json:
384        json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
385        with open(json_local_file, 'w') as f:
386          f.write(json_string)
387        logging.info('Saving Perf UI JSON from test ' +
388                     test + ' to ' + json_local_file)
389
390      raw_perf_data = regex.group(1).split(';')
391
392      for raw_perf_set in raw_perf_data:
393        if raw_perf_set:
394          perf_set = raw_perf_set.split(',')
395          if len(perf_set) != 3:
396            raise FatalTestException('Unexpected number of tokens in '
397                                     'perf annotation string: ' + raw_perf_set)
398
399          # Process the performance data
400          result = GetAverageRunInfoFromJSONString(json_string, perf_set[0])
401
402          PrintPerfResult(perf_set[1], perf_set[2],
403                          [result['average']], result['units'])
404
405  def _SetupIndividualTestTimeoutScale(self, test):
406    timeout_scale = self._GetIndividualTestTimeoutScale(test)
407    valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale)
408
409  def _GetIndividualTestTimeoutScale(self, test):
410    """Returns the timeout scale for the given |test|."""
411    annotations = self.apks[0].GetTestAnnotations(test)
412    timeout_scale = 1
413    if 'TimeoutScale' in annotations:
414      for annotation in annotations:
415        scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
416        if scale_match:
417          timeout_scale = int(scale_match.group(1))
418    if self.wait_for_debugger:
419      timeout_scale *= 100
420    return timeout_scale
421
422  def _GetIndividualTestTimeoutSecs(self, test):
423    """Returns the timeout in seconds for the given |test|."""
424    annotations = self.apks[0].GetTestAnnotations(test)
425    if 'Manual' in annotations:
426      return 600 * 60
427    if 'External' in annotations:
428      return 10 * 60
429    if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
430      return 5 * 60
431    if 'MediumTest' in annotations:
432      return 3 * 60
433    return 1 * 60
434
435  def RunTests(self):
436    """Runs the tests, generating the coverage if needed.
437
438    Returns:
439      A TestResults object.
440    """
441    instrumentation_path = (self.instrumentation_class_path +
442                            '/android.test.InstrumentationTestRunner')
443    instrumentation_args = self._GetInstrumentationArgs()
444    for test in self._GetTestsIter():
445      test_result = None
446      start_date_ms = None
447      try:
448        self.TestSetup(test)
449        start_date_ms = int(time.time()) * 1000
450        args_with_filter = dict(instrumentation_args)
451        args_with_filter['class'] = test
452        # |test_results| is a list that should contain
453        # a single TestResult object.
454        logging.warn(args_with_filter)
455        (test_results, _) = self.adb.Adb().StartInstrumentation(
456            instrumentation_path=instrumentation_path,
457            instrumentation_args=args_with_filter,
458            timeout_time=(self._GetIndividualTestTimeoutSecs(test) *
459                          self._GetIndividualTestTimeoutScale(test) *
460                          self.tool.GetTimeoutScale()))
461        duration_ms = int(time.time()) * 1000 - start_date_ms
462        assert len(test_results) == 1
463        test_result = test_results[0]
464        status_code = test_result.GetStatusCode()
465        if status_code:
466          log = test_result.GetFailureReason()
467          if not log:
468            log = 'No information.'
469          if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0:
470            self._TakeScreenshot(test)
471          self.test_results.failed += [SingleTestResult(test, start_date_ms,
472                                                        duration_ms, log)]
473        else:
474          result = [SingleTestResult(test, start_date_ms, duration_ms)]
475          self.test_results.ok += result
476      # Catch exceptions thrown by StartInstrumentation().
477      # See ../../third_party/android/testrunner/adb_interface.py
478      except (errors.WaitForResponseTimedOutError,
479              errors.DeviceUnresponsiveError,
480              errors.InstrumentationError), e:
481        if start_date_ms:
482          duration_ms = int(time.time()) * 1000 - start_date_ms
483        else:
484          start_date_ms = int(time.time()) * 1000
485          duration_ms = 0
486        message = str(e)
487        if not message:
488          message = 'No information.'
489        self.test_results.crashed += [SingleTestResult(test, start_date_ms,
490                                                       duration_ms,
491                                                       message)]
492        test_result = None
493      self.TestTeardown(test, test_result)
494    return self.test_results
495
496
497class TestSharder(BaseTestSharder):
498  """Responsible for sharding the tests on the connected devices."""
499
500  def __init__(self, attached_devices, options, tests, apks):
501    BaseTestSharder.__init__(self, attached_devices)
502    self.options = options
503    self.tests = tests
504    self.apks = apks
505
506  def SetupSharding(self, tests):
507    """Called before starting the shards."""
508    SetTestsContainer(sharded_tests_queue.ShardedTestsQueue(
509        len(self.attached_devices), tests))
510
511  def CreateShardedTestRunner(self, device, index):
512    """Creates a sharded test runner.
513
514    Args:
515      device: Device serial where this shard will run.
516      index: Index of this device in the pool.
517
518    Returns:
519      A TestRunner object.
520    """
521    return TestRunner(self.options, device, None, False, index, self.apks, [])
522
523
524def DispatchJavaTests(options, apks):
525  """Dispatches Java tests onto connected device(s).
526
527  If possible, this method will attempt to shard the tests to
528  all connected devices. Otherwise, dispatch and run tests on one device.
529
530  Args:
531    options: Command line options.
532    apks: list of APKs to use.
533
534  Returns:
535    A TestResults object holding the results of the Java tests.
536
537  Raises:
538    FatalTestException: when there's no attached the devices.
539  """
540  test_apk = apks[0]
541  if options.annotation:
542    available_tests = test_apk.GetAnnotatedTests(options.annotation)
543    if len(options.annotation) == 1 and options.annotation[0] == 'SmallTest':
544      tests_without_annotation = [
545          m for m in
546          test_apk.GetTestMethods()
547          if not test_apk.GetTestAnnotations(m) and
548          not apk_info.ApkInfo.IsPythonDrivenTest(m)]
549      if tests_without_annotation:
550        tests_without_annotation.sort()
551        logging.warning('The following tests do not contain any annotation. '
552                        'Assuming "SmallTest":\n%s',
553                        '\n'.join(tests_without_annotation))
554        available_tests += tests_without_annotation
555  else:
556    available_tests = [m for m in test_apk.GetTestMethods()
557                       if not apk_info.ApkInfo.IsPythonDrivenTest(m)]
558  coverage = os.environ.get('EMMA_INSTRUMENT') == 'true'
559
560  tests = []
561  if options.test_filter:
562    # |available_tests| are in adb instrument format: package.path.class#test.
563    filter_without_hash = options.test_filter.replace('#', '.')
564    tests = [t for t in available_tests
565             if filter_without_hash in t.replace('#', '.')]
566  else:
567    tests = available_tests
568
569  if not tests:
570    logging.warning('No Java tests to run with current args.')
571    return TestResults()
572
573  tests *= options.number_of_runs
574
575  attached_devices = android_commands.GetAttachedDevices()
576  test_results = TestResults()
577
578  if not attached_devices:
579    raise FatalTestException('You have no devices attached or visible!')
580  if options.device:
581    attached_devices = [options.device]
582
583  logging.info('Will run: %s', str(tests))
584
585  if len(attached_devices) > 1 and (coverage or options.wait_for_debugger):
586    logging.warning('Coverage / debugger can not be sharded, '
587                    'using first available device')
588    attached_devices = attached_devices[:1]
589  sharder = TestSharder(attached_devices, options, tests, apks)
590  test_results = sharder.RunShardedTests()
591  return test_results
592