1# Copyright 2015 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import collections
6import contextlib
7import copy
8import hashlib
9import json
10import logging
11import os
12import posixpath
13import re
14import shutil
15import sys
16import tempfile
17import time
18
19from devil import base_error
20from devil.android import crash_handler
21from devil.android import device_errors
22from devil.android import device_temp_file
23from devil.android import flag_changer
24from devil.android.sdk import shared_prefs
25from devil.android import logcat_monitor
26from devil.android.tools import system_app
27from devil.android.tools import webview_app
28from devil.utils import reraiser_thread
29from incremental_install import installer
30from pylib import constants
31from pylib import valgrind_tools
32from pylib.base import base_test_result
33from pylib.base import output_manager
34from pylib.constants import host_paths
35from pylib.instrumentation import instrumentation_test_instance
36from pylib.local.device import local_device_environment
37from pylib.local.device import local_device_test_run
38from pylib.output import remote_output_manager
39from pylib.utils import chrome_proxy_utils
40from pylib.utils import gold_utils
41from pylib.utils import instrumentation_tracing
42from pylib.utils import shared_preference_utils
43from py_trace_event import trace_event
44from py_trace_event import trace_time
45from py_utils import contextlib_ext
46from py_utils import tempfile_ext
47import tombstones
48
49with host_paths.SysPath(
50    os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'), 0):
51  import jinja2  # pylint: disable=import-error
52  import markupsafe  # pylint: disable=import-error,unused-import
53
54
55_JINJA_TEMPLATE_DIR = os.path.join(
56    host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'pylib', 'instrumentation')
57_JINJA_TEMPLATE_FILENAME = 'render_test.html.jinja'
58
59_WPR_GO_LINUX_X86_64_PATH = os.path.join(host_paths.DIR_SOURCE_ROOT,
60                                         'third_party', 'webpagereplay', 'bin',
61                                         'linux', 'x86_64', 'wpr')
62
63_TAG = 'test_runner_py'
64
65TIMEOUT_ANNOTATIONS = [
66  ('Manual', 10 * 60 * 60),
67  ('IntegrationTest', 30 * 60),
68  ('External', 10 * 60),
69  ('EnormousTest', 10 * 60),
70  ('LargeTest', 5 * 60),
71  ('MediumTest', 3 * 60),
72  ('SmallTest', 1 * 60),
73]
74
75LOGCAT_FILTERS = ['*:e', 'chromium:v', 'cr_*:v', 'DEBUG:I',
76                  'StrictMode:D', '%s:I' % _TAG]
77
78EXTRA_SCREENSHOT_FILE = (
79    'org.chromium.base.test.ScreenshotOnFailureStatement.ScreenshotFile')
80
81EXTRA_UI_CAPTURE_DIR = (
82    'org.chromium.base.test.util.Screenshooter.ScreenshotDir')
83
84EXTRA_TRACE_FILE = ('org.chromium.base.test.BaseJUnit4ClassRunner.TraceFile')
85
86_EXTRA_TEST_LIST = (
87    'org.chromium.base.test.BaseChromiumAndroidJUnitRunner.TestList')
88
89_EXTRA_PACKAGE_UNDER_TEST = ('org.chromium.chrome.test.pagecontroller.rules.'
90                             'ChromeUiApplicationTestRule.PackageUnderTest')
91
92FEATURE_ANNOTATION = 'Feature'
93RENDER_TEST_FEATURE_ANNOTATION = 'RenderTest'
94WPR_ARCHIVE_FILE_PATH_ANNOTATION = 'WPRArchiveDirectory'
95WPR_RECORD_REPLAY_TEST_FEATURE_ANNOTATION = 'WPRRecordReplayTest'
96
97_DEVICE_GOLD_DIR = 'skia_gold'
98# A map of Android product models to SDK ints.
99RENDER_TEST_MODEL_SDK_CONFIGS = {
100    'Nexus 5X': [23],
101}
102
103_TEST_BATCH_MAX_GROUP_SIZE = 256
104
105
106@contextlib.contextmanager
107def _LogTestEndpoints(device, test_name):
108  device.RunShellCommand(
109      ['log', '-p', 'i', '-t', _TAG, 'START %s' % test_name],
110      check_return=True)
111  try:
112    yield
113  finally:
114    device.RunShellCommand(
115        ['log', '-p', 'i', '-t', _TAG, 'END %s' % test_name],
116        check_return=True)
117
118# TODO(jbudorick): Make this private once the instrumentation test_runner
119# is deprecated.
120def DidPackageCrashOnDevice(package_name, device):
121  # Dismiss any error dialogs. Limit the number in case we have an error
122  # loop or we are failing to dismiss.
123  try:
124    for _ in xrange(10):
125      package = device.DismissCrashDialogIfNeeded(timeout=10, retries=1)
126      if not package:
127        return False
128      # Assume test package convention of ".test" suffix
129      if package in package_name:
130        return True
131  except device_errors.CommandFailedError:
132    logging.exception('Error while attempting to dismiss crash dialog.')
133  return False
134
135
136_CURRENT_FOCUS_CRASH_RE = re.compile(
137    r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
138
139
140def _GetTargetPackageName(test_apk):
141  # apk_under_test does not work for smoke tests, where it is set to an
142  # apk that is not listed as the targetPackage in the test apk's manifest.
143  return test_apk.GetAllInstrumentations()[0]['android:targetPackage']
144
145
146class LocalDeviceInstrumentationTestRun(
147    local_device_test_run.LocalDeviceTestRun):
148  def __init__(self, env, test_instance):
149    super(LocalDeviceInstrumentationTestRun, self).__init__(
150        env, test_instance)
151    self._chrome_proxy = None
152    self._context_managers = collections.defaultdict(list)
153    self._flag_changers = {}
154    self._render_tests_device_output_dir = None
155    self._shared_prefs_to_restore = []
156    self._skia_gold_session_manager = None
157    self._skia_gold_work_dir = None
158
159  #override
160  def TestPackage(self):
161    return self._test_instance.suite
162
163  #override
164  def SetUp(self):
165    target_package = _GetTargetPackageName(self._test_instance.test_apk)
166
167    @local_device_environment.handle_shard_failures_with(
168        self._env.DenylistDevice)
169    @trace_event.traced
170    def individual_device_set_up(device, host_device_tuples):
171      steps = []
172
173      if self._test_instance.replace_system_package:
174        @trace_event.traced
175        def replace_package(dev):
176          # We need the context manager to be applied before modifying any
177          # shared preference files in case the replacement APK needs to be
178          # set up, and it needs to be applied while the test is running.
179          # Thus, it needs to be applied early during setup, but must still be
180          # applied during _RunTest, which isn't possible using 'with' without
181          # applying the context manager up in test_runner. Instead, we
182          # manually invoke its __enter__ and __exit__ methods in setup and
183          # teardown.
184          system_app_context = system_app.ReplaceSystemApp(
185              dev, self._test_instance.replace_system_package.package,
186              self._test_instance.replace_system_package.replacement_apk)
187          # Pylint is not smart enough to realize that this field has
188          # an __enter__ method, and will complain loudly.
189          # pylint: disable=no-member
190          system_app_context.__enter__()
191          # pylint: enable=no-member
192          self._context_managers[str(dev)].append(system_app_context)
193
194        steps.append(replace_package)
195
196      if self._test_instance.system_packages_to_remove:
197
198        @trace_event.traced
199        def remove_packages(dev):
200          logging.info('Attempting to remove system packages %s',
201                       self._test_instance.system_packages_to_remove)
202          system_app.RemoveSystemApps(
203              dev, self._test_instance.system_packages_to_remove)
204          logging.info('Done removing system packages')
205
206        # This should be at the front in case we're removing the package to make
207        # room for another APK installation later on. Since we disallow
208        # concurrent adb with this option specified, this should be safe.
209        steps.insert(0, remove_packages)
210
211      if self._test_instance.use_webview_provider:
212        @trace_event.traced
213        def use_webview_provider(dev):
214          # We need the context manager to be applied before modifying any
215          # shared preference files in case the replacement APK needs to be
216          # set up, and it needs to be applied while the test is running.
217          # Thus, it needs to be applied early during setup, but must still be
218          # applied during _RunTest, which isn't possible using 'with' without
219          # applying the context manager up in test_runner. Instead, we
220          # manually invoke its __enter__ and __exit__ methods in setup and
221          # teardown.
222          webview_context = webview_app.UseWebViewProvider(
223              dev, self._test_instance.use_webview_provider)
224          # Pylint is not smart enough to realize that this field has
225          # an __enter__ method, and will complain loudly.
226          # pylint: disable=no-member
227          webview_context.__enter__()
228          # pylint: enable=no-member
229          self._context_managers[str(dev)].append(webview_context)
230
231        steps.append(use_webview_provider)
232
233      def install_helper(apk,
234                         modules=None,
235                         fake_modules=None,
236                         permissions=None,
237                         additional_locales=None):
238
239        @instrumentation_tracing.no_tracing
240        @trace_event.traced
241        def install_helper_internal(d, apk_path=None):
242          # pylint: disable=unused-argument
243          d.Install(apk,
244                    modules=modules,
245                    fake_modules=fake_modules,
246                    permissions=permissions,
247                    additional_locales=additional_locales)
248
249        return install_helper_internal
250
251      def incremental_install_helper(apk, json_path, permissions):
252
253        @trace_event.traced
254        def incremental_install_helper_internal(d, apk_path=None):
255          # pylint: disable=unused-argument
256          installer.Install(d, json_path, apk=apk, permissions=permissions)
257        return incremental_install_helper_internal
258
259      permissions = self._test_instance.test_apk.GetPermissions()
260      if self._test_instance.test_apk_incremental_install_json:
261        steps.append(incremental_install_helper(
262                         self._test_instance.test_apk,
263                         self._test_instance.
264                             test_apk_incremental_install_json,
265                         permissions))
266      else:
267        steps.append(
268            install_helper(
269                self._test_instance.test_apk, permissions=permissions))
270
271      steps.extend(
272          install_helper(apk) for apk in self._test_instance.additional_apks)
273
274      # The apk under test needs to be installed last since installing other
275      # apks after will unintentionally clear the fake module directory.
276      # TODO(wnwen): Make this more robust, fix crbug.com/1010954.
277      if self._test_instance.apk_under_test:
278        permissions = self._test_instance.apk_under_test.GetPermissions()
279        if self._test_instance.apk_under_test_incremental_install_json:
280          steps.append(
281              incremental_install_helper(
282                  self._test_instance.apk_under_test,
283                  self._test_instance.apk_under_test_incremental_install_json,
284                  permissions))
285        else:
286          steps.append(
287              install_helper(self._test_instance.apk_under_test,
288                             self._test_instance.modules,
289                             self._test_instance.fake_modules, permissions,
290                             self._test_instance.additional_locales))
291
292      @trace_event.traced
293      def set_debug_app(dev):
294        # Set debug app in order to enable reading command line flags on user
295        # builds
296        cmd = ['am', 'set-debug-app', '--persistent']
297        if self._test_instance.wait_for_java_debugger:
298          cmd.append('-w')
299        cmd.append(target_package)
300        dev.RunShellCommand(cmd, check_return=True)
301
302      @trace_event.traced
303      def edit_shared_prefs(dev):
304        for setting in self._test_instance.edit_shared_prefs:
305          shared_pref = shared_prefs.SharedPrefs(
306              dev, setting['package'], setting['filename'],
307              use_encrypted_path=setting.get('supports_encrypted_path', False))
308          pref_to_restore = copy.copy(shared_pref)
309          pref_to_restore.Load()
310          self._shared_prefs_to_restore.append(pref_to_restore)
311
312          shared_preference_utils.ApplySharedPreferenceSetting(
313              shared_pref, setting)
314
315      @trace_event.traced
316      def set_vega_permissions(dev):
317        # Normally, installation of VrCore automatically grants storage
318        # permissions. However, since VrCore is part of the system image on
319        # the Vega standalone headset, we don't install the APK as part of test
320        # setup. Instead, grant the permissions here so that it can take
321        # screenshots.
322        if dev.product_name == 'vega':
323          dev.GrantPermissions('com.google.vr.vrcore', [
324              'android.permission.WRITE_EXTERNAL_STORAGE',
325              'android.permission.READ_EXTERNAL_STORAGE'
326          ])
327
328      @instrumentation_tracing.no_tracing
329      def push_test_data(dev):
330        device_root = posixpath.join(dev.GetExternalStoragePath(),
331                                     'chromium_tests_root')
332        host_device_tuples_substituted = [
333            (h, local_device_test_run.SubstituteDeviceRoot(d, device_root))
334            for h, d in host_device_tuples]
335        logging.info('Pushing data dependencies.')
336        for h, d in host_device_tuples_substituted:
337          logging.debug('  %r -> %r', h, d)
338        local_device_environment.place_nomedia_on_device(dev, device_root)
339        dev.PushChangedFiles(host_device_tuples_substituted,
340                             delete_device_stale=True)
341        if not host_device_tuples_substituted:
342          dev.RunShellCommand(['rm', '-rf', device_root], check_return=True)
343          dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True)
344
345      @trace_event.traced
346      def create_flag_changer(dev):
347        if self._test_instance.flags:
348          self._CreateFlagChangerIfNeeded(dev)
349          logging.debug('Attempting to set flags: %r',
350                        self._test_instance.flags)
351          self._flag_changers[str(dev)].AddFlags(self._test_instance.flags)
352
353        valgrind_tools.SetChromeTimeoutScale(
354            dev, self._test_instance.timeout_scale)
355
356      steps += [
357          set_debug_app, edit_shared_prefs, push_test_data, create_flag_changer,
358          set_vega_permissions
359      ]
360
361      def bind_crash_handler(step, dev):
362        return lambda: crash_handler.RetryOnSystemCrash(step, dev)
363
364      steps = [bind_crash_handler(s, device) for s in steps]
365
366      try:
367        if self._env.concurrent_adb:
368          reraiser_thread.RunAsync(steps)
369        else:
370          for step in steps:
371            step()
372        if self._test_instance.store_tombstones:
373          tombstones.ClearAllTombstones(device)
374      except device_errors.CommandFailedError:
375        if not device.IsOnline():
376          raise
377
378        # A bugreport can be large and take a while to generate, so only capture
379        # one if we're using a remote manager.
380        if isinstance(
381            self._env.output_manager,
382            remote_output_manager.RemoteOutputManager):
383          logging.error(
384              'Error when setting up device for tests. Taking a bugreport for '
385              'investigation. This may take a while...')
386          report_name = '%s.bugreport' % device.serial
387          with self._env.output_manager.ArchivedTempfile(
388              report_name, 'bug_reports') as report_file:
389            device.TakeBugReport(report_file.name)
390          logging.error('Bug report saved to %s', report_file.Link())
391        raise
392
393    self._env.parallel_devices.pMap(
394        individual_device_set_up,
395        self._test_instance.GetDataDependencies())
396    # Created here instead of on a per-test basis so that the downloaded
397    # expectations can be re-used between tests, saving a significant amount
398    # of time.
399    self._skia_gold_work_dir = tempfile.mkdtemp()
400    self._skia_gold_session_manager = gold_utils.AndroidSkiaGoldSessionManager(
401        self._skia_gold_work_dir, self._test_instance.skia_gold_properties)
402    if self._test_instance.wait_for_java_debugger:
403      logging.warning('*' * 80)
404      logging.warning('Waiting for debugger to attach to process: %s',
405                      target_package)
406      logging.warning('*' * 80)
407
408  #override
409  def TearDown(self):
410    shutil.rmtree(self._skia_gold_work_dir)
411    self._skia_gold_work_dir = None
412    self._skia_gold_session_manager = None
413    # By default, teardown will invoke ADB. When receiving SIGTERM due to a
414    # timeout, there's a high probability that ADB is non-responsive. In these
415    # cases, sending an ADB command will potentially take a long time to time
416    # out. Before this happens, the process will be hard-killed for not
417    # responding to SIGTERM fast enough.
418    if self._received_sigterm:
419      return
420
421    @local_device_environment.handle_shard_failures_with(
422        self._env.DenylistDevice)
423    @trace_event.traced
424    def individual_device_tear_down(dev):
425      if str(dev) in self._flag_changers:
426        self._flag_changers[str(dev)].Restore()
427
428      # Remove package-specific configuration
429      dev.RunShellCommand(['am', 'clear-debug-app'], check_return=True)
430
431      valgrind_tools.SetChromeTimeoutScale(dev, None)
432
433      # Restore any shared preference files that we stored during setup.
434      # This should be run sometime before the replace package contextmanager
435      # gets exited so we don't have to special case restoring files of
436      # replaced system apps.
437      for pref_to_restore in self._shared_prefs_to_restore:
438        pref_to_restore.Commit(force_commit=True)
439
440      # Context manager exit handlers are applied in reverse order
441      # of the enter handlers.
442      for context in reversed(self._context_managers[str(dev)]):
443        # See pylint-related comment above with __enter__()
444        # pylint: disable=no-member
445        context.__exit__(*sys.exc_info())
446        # pylint: enable=no-member
447
448    self._env.parallel_devices.pMap(individual_device_tear_down)
449
450  def _CreateFlagChangerIfNeeded(self, device):
451    if str(device) not in self._flag_changers:
452      cmdline_file = 'test-cmdline-file'
453      if self._test_instance.use_apk_under_test_flags_file:
454        if self._test_instance.package_info:
455          cmdline_file = self._test_instance.package_info.cmdline_file
456        else:
457          raise Exception('No PackageInfo found but'
458                          '--use-apk-under-test-flags-file is specified.')
459      self._flag_changers[str(device)] = flag_changer.FlagChanger(
460          device, cmdline_file)
461
462  #override
463  def _CreateShards(self, tests):
464    return tests
465
466  #override
467  def _GetTests(self):
468    if self._test_instance.junit4_runner_supports_listing:
469      raw_tests = self._GetTestsFromRunner()
470      tests = self._test_instance.ProcessRawTests(raw_tests)
471    else:
472      tests = self._test_instance.GetTests()
473    tests = self._ApplyExternalSharding(
474        tests, self._test_instance.external_shard_index,
475        self._test_instance.total_external_shards)
476    return tests
477
478  #override
479  def _GroupTests(self, tests):
480    batched_tests = dict()
481    other_tests = []
482    for test in tests:
483      if 'Batch' in test['annotations'] and 'RequiresRestart' not in test[
484          'annotations']:
485        batch_name = test['annotations']['Batch']['value']
486        if not batch_name:
487          batch_name = test['class']
488        if not batch_name in batched_tests:
489          batched_tests[batch_name] = []
490        batched_tests[batch_name].append(test)
491      else:
492        other_tests.append(test)
493
494    all_tests = []
495    for _, tests in batched_tests.items():
496      tests.sort()  # Ensure a consistent ordering across external shards.
497      all_tests.extend([
498          tests[i:i + _TEST_BATCH_MAX_GROUP_SIZE]
499          for i in range(0, len(tests), _TEST_BATCH_MAX_GROUP_SIZE)
500      ])
501    all_tests.extend(other_tests)
502    return all_tests
503
504  #override
505  def _GetUniqueTestName(self, test):
506    return instrumentation_test_instance.GetUniqueTestName(test)
507
508  #override
509  def _RunTest(self, device, test):
510    extras = {}
511
512    # Provide package name under test for apk_under_test.
513    if self._test_instance.apk_under_test:
514      package_name = self._test_instance.apk_under_test.GetPackageName()
515      extras[_EXTRA_PACKAGE_UNDER_TEST] = package_name
516
517    flags_to_add = []
518    test_timeout_scale = None
519    if self._test_instance.coverage_directory:
520      coverage_basename = '%s.exec' % (
521          '%s_%s_group' % (test[0]['class'], test[0]['method']) if isinstance(
522              test, list) else '%s_%s' % (test['class'], test['method']))
523      extras['coverage'] = 'true'
524      coverage_directory = os.path.join(
525          device.GetExternalStoragePath(), 'chrome', 'test', 'coverage')
526      if not device.PathExists(coverage_directory):
527        device.RunShellCommand(['mkdir', '-p', coverage_directory],
528                               check_return=True)
529      coverage_device_file = os.path.join(
530          coverage_directory, coverage_basename)
531      extras['coverageFile'] = coverage_device_file
532    # Save screenshot if screenshot dir is specified (save locally) or if
533    # a GS bucket is passed (save in cloud).
534    screenshot_device_file = device_temp_file.DeviceTempFile(
535        device.adb, suffix='.png', dir=device.GetExternalStoragePath())
536    extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name
537
538    # Set up the screenshot directory. This needs to be done for each test so
539    # that we only get screenshots created by that test. It has to be on
540    # external storage since the default location doesn't allow file creation
541    # from the instrumentation test app on Android L and M.
542    ui_capture_dir = device_temp_file.NamedDeviceTemporaryDirectory(
543        device.adb,
544        dir=device.GetExternalStoragePath())
545    extras[EXTRA_UI_CAPTURE_DIR] = ui_capture_dir.name
546
547    if self._env.trace_output:
548      trace_device_file = device_temp_file.DeviceTempFile(
549          device.adb, suffix='.json', dir=device.GetExternalStoragePath())
550      extras[EXTRA_TRACE_FILE] = trace_device_file.name
551
552    target = '%s/%s' % (self._test_instance.test_package,
553                        self._test_instance.junit4_runner_class)
554    if isinstance(test, list):
555
556      def name_and_timeout(t):
557        n = instrumentation_test_instance.GetTestName(t)
558        i = self._GetTimeoutFromAnnotations(t['annotations'], n)
559        return (n, i)
560
561      test_names, timeouts = zip(*(name_and_timeout(t) for t in test))
562
563      test_name = instrumentation_test_instance.GetTestName(test[0]) + '_batch'
564      extras['class'] = ','.join(test_names)
565      test_display_name = test_name
566      timeout = sum(timeouts)
567    else:
568      assert test['is_junit4']
569      test_name = instrumentation_test_instance.GetTestName(test)
570      test_display_name = self._GetUniqueTestName(test)
571
572      extras['class'] = test_name
573      if 'flags' in test and test['flags']:
574        flags_to_add.extend(test['flags'])
575      timeout = self._GetTimeoutFromAnnotations(
576        test['annotations'], test_display_name)
577
578      test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
579          test['annotations'])
580      if test_timeout_scale and test_timeout_scale != 1:
581        valgrind_tools.SetChromeTimeoutScale(
582            device, test_timeout_scale * self._test_instance.timeout_scale)
583
584    if self._test_instance.wait_for_java_debugger:
585      timeout = None
586    logging.info('preparing to run %s: %s', test_display_name, test)
587
588    if _IsRenderTest(test):
589      # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
590      self._render_tests_device_output_dir = posixpath.join(
591          device.GetExternalStoragePath(), 'render_test_output_dir')
592      flags_to_add.append('--render-test-output-dir=%s' %
593                          self._render_tests_device_output_dir)
594
595    if _IsWPRRecordReplayTest(test):
596      wpr_archive_relative_path = _GetWPRArchivePath(test)
597      if not wpr_archive_relative_path:
598        raise RuntimeError('Could not find the WPR archive file path '
599                           'from annotation.')
600      wpr_archive_path = os.path.join(host_paths.DIR_SOURCE_ROOT,
601                                      wpr_archive_relative_path)
602      if not os.path.isdir(wpr_archive_path):
603        raise RuntimeError('WPRArchiveDirectory annotation should point '
604                           'to a directory only. '
605                           '{0} exist: {1}'.format(
606                               wpr_archive_path,
607                               os.path.exists(wpr_archive_path)))
608
609      # Some linux version does not like # in the name. Replaces it with __.
610      archive_path = os.path.join(
611          wpr_archive_path,
612          _ReplaceUncommonChars(self._GetUniqueTestName(test)) + '.wprgo')
613
614      if not os.path.exists(_WPR_GO_LINUX_X86_64_PATH):
615        # If we got to this stage, then we should have
616        # checkout_android set.
617        raise RuntimeError(
618            'WPR Go binary not found at {}'.format(_WPR_GO_LINUX_X86_64_PATH))
619      # Tells the server to use the binaries retrieved from CIPD.
620      chrome_proxy_utils.ChromeProxySession.SetWPRServerBinary(
621          _WPR_GO_LINUX_X86_64_PATH)
622      self._chrome_proxy = chrome_proxy_utils.ChromeProxySession()
623      self._chrome_proxy.wpr_record_mode = self._test_instance.wpr_record_mode
624      self._chrome_proxy.Start(device, archive_path)
625      flags_to_add.extend(self._chrome_proxy.GetFlags())
626
627    if flags_to_add:
628      self._CreateFlagChangerIfNeeded(device)
629      self._flag_changers[str(device)].PushFlags(add=flags_to_add)
630
631    time_ms = lambda: int(time.time() * 1e3)
632    start_ms = time_ms()
633
634    with ui_capture_dir:
635      with self._ArchiveLogcat(device, test_name) as logcat_file:
636        output = device.StartInstrumentation(
637            target, raw=True, extras=extras, timeout=timeout, retries=0)
638
639      duration_ms = time_ms() - start_ms
640
641      with contextlib_ext.Optional(
642          trace_event.trace('ProcessResults'),
643          self._env.trace_output):
644        output = self._test_instance.MaybeDeobfuscateLines(output)
645        # TODO(jbudorick): Make instrumentation tests output a JSON so this
646        # doesn't have to parse the output.
647        result_code, result_bundle, statuses = (
648            self._test_instance.ParseAmInstrumentRawOutput(output))
649        results = self._test_instance.GenerateTestResults(
650            result_code, result_bundle, statuses, duration_ms,
651            device.product_cpu_abi, self._test_instance.symbolizer)
652
653      if self._env.trace_output:
654        self._SaveTraceData(trace_device_file, device, test['class'])
655
656
657      def restore_flags():
658        if flags_to_add:
659          self._flag_changers[str(device)].Restore()
660
661      def restore_timeout_scale():
662        if test_timeout_scale:
663          valgrind_tools.SetChromeTimeoutScale(
664              device, self._test_instance.timeout_scale)
665
666      def handle_coverage_data():
667        if self._test_instance.coverage_directory:
668          try:
669            if not os.path.exists(self._test_instance.coverage_directory):
670              os.makedirs(self._test_instance.coverage_directory)
671            device.PullFile(coverage_device_file,
672                            self._test_instance.coverage_directory)
673            device.RemovePath(coverage_device_file, True)
674          except (OSError, base_error.BaseError) as e:
675            logging.warning('Failed to handle coverage data after tests: %s', e)
676
677      def handle_render_test_data():
678        if _IsRenderTest(test):
679          # Render tests do not cause test failure by default. So we have to
680          # check to see if any failure images were generated even if the test
681          # does not fail.
682          try:
683            self._ProcessRenderTestResults(device, results)
684          finally:
685            device.RemovePath(self._render_tests_device_output_dir,
686                              recursive=True,
687                              force=True)
688            self._render_tests_device_output_dir = None
689
690      def pull_ui_screen_captures():
691        screenshots = []
692        for filename in device.ListDirectory(ui_capture_dir.name):
693          if filename.endswith('.json'):
694            screenshots.append(pull_ui_screenshot(filename))
695        if screenshots:
696          json_archive_name = 'ui_capture_%s_%s.json' % (
697              test_name.replace('#', '.'),
698              time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
699          with self._env.output_manager.ArchivedTempfile(
700              json_archive_name, 'ui_capture', output_manager.Datatype.JSON
701              ) as json_archive:
702            json.dump(screenshots, json_archive)
703          _SetLinkOnResults(results, 'ui screenshot', json_archive.Link())
704
705      def pull_ui_screenshot(filename):
706        source_dir = ui_capture_dir.name
707        json_path = posixpath.join(source_dir, filename)
708        json_data = json.loads(device.ReadFile(json_path))
709        image_file_path = posixpath.join(source_dir, json_data['location'])
710        with self._env.output_manager.ArchivedTempfile(
711            json_data['location'], 'ui_capture', output_manager.Datatype.PNG
712            ) as image_archive:
713          device.PullFile(image_file_path, image_archive.name)
714        json_data['image_link'] = image_archive.Link()
715        return json_data
716
717      def stop_chrome_proxy():
718        # Removes the port forwarding
719        if self._chrome_proxy:
720          self._chrome_proxy.Stop(device)
721          if not self._chrome_proxy.wpr_replay_mode:
722            logging.info('WPR Record test generated archive file %s',
723                         self._chrome_proxy.wpr_archive_path)
724          self._chrome_proxy = None
725
726
727      # While constructing the TestResult objects, we can parallelize several
728      # steps that involve ADB. These steps should NOT depend on any info in
729      # the results! Things such as whether the test CRASHED have not yet been
730      # determined.
731      post_test_steps = [
732          restore_flags, restore_timeout_scale, stop_chrome_proxy,
733          handle_coverage_data, handle_render_test_data, pull_ui_screen_captures
734      ]
735      if self._env.concurrent_adb:
736        reraiser_thread.RunAsync(post_test_steps)
737      else:
738        for step in post_test_steps:
739          step()
740
741    if logcat_file:
742      _SetLinkOnResults(results, 'logcat', logcat_file.Link())
743
744    # Update the result name if the test used flags.
745    if flags_to_add:
746      for r in results:
747        if r.GetName() == test_name:
748          r.SetName(test_display_name)
749
750    # Add UNKNOWN results for any missing tests.
751    iterable_test = test if isinstance(test, list) else [test]
752    test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
753    results_names = set(r.GetName() for r in results)
754    results.extend(
755        base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN)
756        for u in test_names.difference(results_names))
757
758    # Update the result type if we detect a crash.
759    try:
760      if DidPackageCrashOnDevice(self._test_instance.test_package, device):
761        for r in results:
762          if r.GetType() == base_test_result.ResultType.UNKNOWN:
763            r.SetType(base_test_result.ResultType.CRASH)
764    except device_errors.CommandTimeoutError:
765      logging.warning('timed out when detecting/dismissing error dialogs')
766      # Attach screenshot to the test to help with debugging the dialog boxes.
767      self._SaveScreenshot(device, screenshot_device_file, test_display_name,
768                           results, 'dialog_box_screenshot')
769
770    # The crash result can be set above or in
771    # InstrumentationTestRun.GenerateTestResults. If a test crashes,
772    # subprocesses such as the one used by EmbeddedTestServerRule can be left
773    # alive in a bad state, so kill them now.
774    for r in results:
775      if r.GetType() == base_test_result.ResultType.CRASH:
776        for apk in self._test_instance.additional_apks:
777          device.ForceStop(apk.GetPackageName())
778
779    # Handle failures by:
780    #   - optionally taking a screenshot
781    #   - logging the raw output at INFO level
782    #   - clearing the application state while persisting permissions
783    if any(r.GetType() not in (base_test_result.ResultType.PASS,
784                               base_test_result.ResultType.SKIP)
785           for r in results):
786      self._SaveScreenshot(device, screenshot_device_file, test_display_name,
787                           results, 'post_test_screenshot')
788
789      logging.info('detected failure in %s. raw output:', test_display_name)
790      for l in output:
791        logging.info('  %s', l)
792      if (not self._env.skip_clear_data
793          and self._test_instance.package_info):
794        permissions = (
795            self._test_instance.apk_under_test.GetPermissions()
796            if self._test_instance.apk_under_test
797            else None)
798        device.ClearApplicationState(self._test_instance.package_info.package,
799                                     permissions=permissions)
800    else:
801      logging.debug('raw output from %s:', test_display_name)
802      for l in output:
803        logging.debug('  %s', l)
804
805    if self._test_instance.store_tombstones:
806      resolved_tombstones = tombstones.ResolveTombstones(
807          device,
808          resolve_all_tombstones=True,
809          include_stack_symbols=False,
810          wipe_tombstones=True,
811          tombstone_symbolizer=self._test_instance.symbolizer)
812      if resolved_tombstones:
813        tombstone_filename = 'tombstones_%s_%s' % (time.strftime(
814            '%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
815        with self._env.output_manager.ArchivedTempfile(
816            tombstone_filename, 'tombstones') as tombstone_file:
817          tombstone_file.write('\n'.join(resolved_tombstones))
818
819        # Associate tombstones with first crashing test.
820        for result in results:
821          if result.GetType() == base_test_result.ResultType.CRASH:
822            result.SetLink('tombstones', tombstone_file.Link())
823            break
824        else:
825          # We don't always detect crashes correctly. In this case,
826          # associate with the first test.
827          results[0].SetLink('tombstones', tombstone_file.Link())
828    return results, None
829
830  def _GetTestsFromRunner(self):
831    test_apk_path = self._test_instance.test_apk.path
832    pickle_path = '%s-runner.pickle' % test_apk_path
833    # For incremental APKs, the code doesn't live in the apk, so instead check
834    # the timestamp of the target's .stamp file.
835    if self._test_instance.test_apk_incremental_install_json:
836      with open(self._test_instance.test_apk_incremental_install_json) as f:
837        data = json.load(f)
838      out_dir = constants.GetOutDirectory()
839      test_mtime = max(
840          os.path.getmtime(os.path.join(out_dir, p)) for p in data['dex_files'])
841    else:
842      test_mtime = os.path.getmtime(test_apk_path)
843
844    try:
845      return instrumentation_test_instance.GetTestsFromPickle(
846          pickle_path, test_mtime)
847    except instrumentation_test_instance.TestListPickleException as e:
848      logging.info('Could not get tests from pickle: %s', e)
849    logging.info('Getting tests by having %s list them.',
850                 self._test_instance.junit4_runner_class)
851    def list_tests(d):
852      def _run(dev):
853        with device_temp_file.DeviceTempFile(
854            dev.adb, suffix='.json',
855            dir=dev.GetExternalStoragePath()) as dev_test_list_json:
856          junit4_runner_class = self._test_instance.junit4_runner_class
857          test_package = self._test_instance.test_package
858          extras = {
859            'log': 'true',
860            # Workaround for https://github.com/mockito/mockito/issues/922
861            'notPackage': 'net.bytebuddy',
862          }
863          extras[_EXTRA_TEST_LIST] = dev_test_list_json.name
864          target = '%s/%s' % (test_package, junit4_runner_class)
865          timeout = 240
866          if self._test_instance.wait_for_java_debugger:
867            timeout = None
868          with self._ArchiveLogcat(dev, 'list_tests'):
869            test_list_run_output = dev.StartInstrumentation(
870                target, extras=extras, retries=0, timeout=timeout)
871          if any(test_list_run_output):
872            logging.error('Unexpected output while listing tests:')
873            for line in test_list_run_output:
874              logging.error('  %s', line)
875          with tempfile_ext.NamedTemporaryDirectory() as host_dir:
876            host_file = os.path.join(host_dir, 'list_tests.json')
877            dev.PullFile(dev_test_list_json.name, host_file)
878            with open(host_file, 'r') as host_file:
879              return json.load(host_file)
880
881      return crash_handler.RetryOnSystemCrash(_run, d)
882
883    raw_test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None)
884
885    # If all devices failed to list tests, raise an exception.
886    # Check that tl is not None and is not empty.
887    if all(not tl for tl in raw_test_lists):
888      raise device_errors.CommandFailedError(
889          'Failed to list tests on any device')
890
891    # Get the first viable list of raw tests
892    raw_tests = [tl for tl in raw_test_lists if tl][0]
893
894    instrumentation_test_instance.SaveTestsToPickle(pickle_path, raw_tests)
895    return raw_tests
896
897  @contextlib.contextmanager
898  def _ArchiveLogcat(self, device, test_name):
899    stream_name = 'logcat_%s_%s_%s' % (
900        test_name.replace('#', '.'),
901        time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
902        device.serial)
903
904    logcat_file = None
905    logmon = None
906    try:
907      with self._env.output_manager.ArchivedTempfile(
908          stream_name, 'logcat') as logcat_file:
909        with logcat_monitor.LogcatMonitor(
910            device.adb,
911            filter_specs=local_device_environment.LOGCAT_FILTERS,
912            output_file=logcat_file.name,
913            transform_func=self._test_instance.MaybeDeobfuscateLines,
914            check_error=False) as logmon:
915          with _LogTestEndpoints(device, test_name):
916            with contextlib_ext.Optional(
917                trace_event.trace(test_name),
918                self._env.trace_output):
919              yield logcat_file
920    finally:
921      if logmon:
922        logmon.Close()
923      if logcat_file and logcat_file.Link():
924        logging.info('Logcat saved to %s', logcat_file.Link())
925
926  def _SaveTraceData(self, trace_device_file, device, test_class):
927    trace_host_file = self._env.trace_output
928
929    if device.FileExists(trace_device_file.name):
930      try:
931        java_trace_json = device.ReadFile(trace_device_file.name)
932      except IOError:
933        raise Exception('error pulling trace file from device')
934      finally:
935        trace_device_file.close()
936
937      process_name = '%s (device %s)' % (test_class, device.serial)
938      process_hash = int(hashlib.md5(process_name).hexdigest()[:6], 16)
939
940      java_trace = json.loads(java_trace_json)
941      java_trace.sort(key=lambda event: event['ts'])
942
943      get_date_command = 'echo $EPOCHREALTIME'
944      device_time = device.RunShellCommand(get_date_command, single_line=True)
945      device_time = float(device_time) * 1e6
946      system_time = trace_time.Now()
947      time_difference = system_time - device_time
948
949      threads_to_add = set()
950      for event in java_trace:
951        # Ensure thread ID and thread name will be linked in the metadata.
952        threads_to_add.add((event['tid'], event['name']))
953
954        event['pid'] = process_hash
955
956        # Adjust time stamp to align with Python trace times (from
957        # trace_time.Now()).
958        event['ts'] += time_difference
959
960      for tid, thread_name in threads_to_add:
961        thread_name_metadata = {'pid': process_hash, 'tid': tid,
962                                'ts': 0, 'ph': 'M', 'cat': '__metadata',
963                                'name': 'thread_name',
964                                'args': {'name': thread_name}}
965        java_trace.append(thread_name_metadata)
966
967      process_name_metadata = {'pid': process_hash, 'tid': 0, 'ts': 0,
968                               'ph': 'M', 'cat': '__metadata',
969                               'name': 'process_name',
970                               'args': {'name': process_name}}
971      java_trace.append(process_name_metadata)
972
973      java_trace_json = json.dumps(java_trace)
974      java_trace_json = java_trace_json.rstrip(' ]')
975
976      with open(trace_host_file, 'r') as host_handle:
977        host_contents = host_handle.readline()
978
979      if host_contents:
980        java_trace_json = ',%s' % java_trace_json.lstrip(' [')
981
982      with open(trace_host_file, 'a') as host_handle:
983        host_handle.write(java_trace_json)
984
985  def _SaveScreenshot(self, device, screenshot_device_file, test_name, results,
986                      link_name):
987    screenshot_filename = '%s-%s.png' % (
988        test_name, time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
989    if device.FileExists(screenshot_device_file.name):
990      with self._env.output_manager.ArchivedTempfile(
991          screenshot_filename, 'screenshot',
992          output_manager.Datatype.PNG) as screenshot_host_file:
993        try:
994          device.PullFile(screenshot_device_file.name,
995                          screenshot_host_file.name)
996        finally:
997          screenshot_device_file.close()
998      _SetLinkOnResults(results, link_name, screenshot_host_file.Link())
999
1000  def _ProcessRenderTestResults(self, device, results):
1001    if not self._render_tests_device_output_dir:
1002      return
1003    self._ProcessSkiaGoldRenderTestResults(device, results)
1004
1005  def _ProcessSkiaGoldRenderTestResults(self, device, results):
1006    gold_dir = posixpath.join(self._render_tests_device_output_dir,
1007                              _DEVICE_GOLD_DIR)
1008    if not device.FileExists(gold_dir):
1009      return
1010
1011    gold_properties = self._test_instance.skia_gold_properties
1012    with tempfile_ext.NamedTemporaryDirectory() as host_dir:
1013      use_luci = not (gold_properties.local_pixel_tests
1014                      or gold_properties.no_luci_auth)
1015
1016      # Pull everything at once instead of pulling individually, as it's
1017      # slightly faster since each command over adb has some overhead compared
1018      # to doing the same thing locally.
1019      host_dir = os.path.join(host_dir, _DEVICE_GOLD_DIR)
1020      device.PullFile(gold_dir, host_dir)
1021      for image_name in os.listdir(host_dir):
1022        if not image_name.endswith('.png'):
1023          continue
1024
1025        render_name = image_name[:-4]
1026        json_name = render_name + '.json'
1027        json_path = os.path.join(host_dir, json_name)
1028        image_path = os.path.join(host_dir, image_name)
1029        if not os.path.exists(json_path):
1030          _FailTestIfNecessary(results)
1031          _AppendToLog(
1032              results, 'Unable to find corresponding JSON file for image %s '
1033              'when doing Skia Gold comparison.' % image_name)
1034          continue
1035
1036        # Add 'ignore': '1' if a comparison failure would not be surfaced, as
1037        # that implies that we aren't actively maintaining baselines for the
1038        # test. This helps prevent unrelated CLs from getting comments posted to
1039        # them.
1040        with open(json_path) as infile:
1041          # All the key/value pairs in the JSON file are strings, so convert
1042          # to a bool.
1043          json_dict = json.load(infile)
1044          fail_on_unsupported = json_dict.get('fail_on_unsupported_configs',
1045                                              'false')
1046          fail_on_unsupported = fail_on_unsupported.lower() == 'true'
1047        should_hide_failure = (
1048            device.build_version_sdk not in RENDER_TEST_MODEL_SDK_CONFIGS.get(
1049                device.product_model, []) and not fail_on_unsupported)
1050        if should_hide_failure:
1051          json_dict['ignore'] = '1'
1052          with open(json_path, 'w') as outfile:
1053            json.dump(json_dict, outfile)
1054
1055        gold_session = self._skia_gold_session_manager.GetSkiaGoldSession(
1056            keys_input=json_path)
1057
1058        try:
1059          status, error = gold_session.RunComparison(
1060              name=render_name,
1061              png_file=image_path,
1062              output_manager=self._env.output_manager,
1063              use_luci=use_luci)
1064        except Exception as e:  # pylint: disable=broad-except
1065          _FailTestIfNecessary(results)
1066          _AppendToLog(results, 'Skia Gold comparison raised exception: %s' % e)
1067          continue
1068
1069        if not status:
1070          continue
1071
1072        # Don't fail the test if we ran on an unsupported configuration unless
1073        # the test has explicitly opted in, as it's likely that baselines
1074        # aren't maintained for that configuration.
1075        if should_hide_failure:
1076          if self._test_instance.skia_gold_properties.local_pixel_tests:
1077            _AppendToLog(
1078                results, 'Gold comparison for %s failed, but model %s with SDK '
1079                '%d is not a supported configuration. This failure would be '
1080                'ignored on the bots, but failing since tests are being run '
1081                'locally.' % (render_name, device.product_model,
1082                              device.build_version_sdk))
1083          else:
1084            _AppendToLog(
1085                results, 'Gold comparison for %s failed, but model %s with SDK '
1086                '%d is not a supported configuration, so ignoring failure.' %
1087                (render_name, device.product_model, device.build_version_sdk))
1088            continue
1089
1090        _FailTestIfNecessary(results)
1091        failure_log = (
1092            'Skia Gold reported failure for RenderTest %s. See '
1093            'RENDER_TESTS.md for how to fix this failure.' % render_name)
1094        status_codes = gold_utils.AndroidSkiaGoldSession.StatusCodes
1095        if status == status_codes.AUTH_FAILURE:
1096          _AppendToLog(results,
1097                       'Gold authentication failed with output %s' % error)
1098        elif status == status_codes.INIT_FAILURE:
1099          _AppendToLog(results,
1100                       'Gold initialization failed with output %s' % error)
1101        elif status == status_codes.COMPARISON_FAILURE_REMOTE:
1102          public_triage_link, internal_triage_link =\
1103              gold_session.GetTriageLinks(render_name)
1104          if not public_triage_link:
1105            _AppendToLog(
1106                results, 'Failed to get triage link for %s, raw output: %s' %
1107                (render_name, error))
1108            _AppendToLog(
1109                results, 'Reason for no triage link: %s' %
1110                gold_session.GetTriageLinkOmissionReason(render_name))
1111            continue
1112          if gold_properties.IsTryjobRun():
1113            _SetLinkOnResults(results,
1114                              'Public Skia Gold triage link for entire CL',
1115                              public_triage_link)
1116            _SetLinkOnResults(results,
1117                              'Internal Skia Gold triage link for entire CL',
1118                              internal_triage_link)
1119          else:
1120            _SetLinkOnResults(
1121                results, 'Public Skia Gold triage link for %s' % render_name,
1122                public_triage_link)
1123            _SetLinkOnResults(
1124                results, 'Internal Skia Gold triage link for %s' % render_name,
1125                internal_triage_link)
1126          _AppendToLog(results, failure_log)
1127
1128        elif status == status_codes.COMPARISON_FAILURE_LOCAL:
1129          given_link = gold_session.GetGivenImageLink(render_name)
1130          closest_link = gold_session.GetClosestImageLink(render_name)
1131          diff_link = gold_session.GetDiffImageLink(render_name)
1132
1133          processed_template_output = _GenerateRenderTestHtml(
1134              render_name, given_link, closest_link, diff_link)
1135          with self._env.output_manager.ArchivedTempfile(
1136              '%s.html' % render_name, 'gold_local_diffs',
1137              output_manager.Datatype.HTML) as html_results:
1138            html_results.write(processed_template_output)
1139          _SetLinkOnResults(results, render_name, html_results.Link())
1140          _AppendToLog(
1141              results,
1142              'See %s link for diff image with closest positive.' % render_name)
1143        elif status == status_codes.LOCAL_DIFF_FAILURE:
1144          _AppendToLog(results,
1145                       'Failed to generate diffs from Gold: %s' % error)
1146        else:
1147          logging.error(
1148              'Given unhandled SkiaGoldSession StatusCode %s with error %s',
1149              status, error)
1150
1151  #override
1152  def _ShouldRetry(self, test, result):
1153    # We've tried to disable retries in the past with mixed results.
1154    # See crbug.com/619055 for historical context and crbug.com/797002
1155    # for ongoing efforts.
1156    del test, result
1157    return True
1158
1159  #override
1160  def _ShouldShard(self):
1161    return True
1162
1163  @classmethod
1164  def _GetTimeoutScaleFromAnnotations(cls, annotations):
1165    try:
1166      return int(annotations.get('TimeoutScale', {}).get('value', 1))
1167    except ValueError as e:
1168      logging.warning("Non-integer value of TimeoutScale ignored. (%s)", str(e))
1169      return 1
1170
1171  @classmethod
1172  def _GetTimeoutFromAnnotations(cls, annotations, test_name):
1173    for k, v in TIMEOUT_ANNOTATIONS:
1174      if k in annotations:
1175        timeout = v
1176        break
1177    else:
1178      logging.warning('Using default 1 minute timeout for %s', test_name)
1179      timeout = 60
1180
1181    timeout *= cls._GetTimeoutScaleFromAnnotations(annotations)
1182
1183    return timeout
1184
1185
1186def _IsWPRRecordReplayTest(test):
1187  """Determines whether a test or a list of tests is a WPR RecordReplay Test."""
1188  if not isinstance(test, list):
1189    test = [test]
1190  return any([
1191      WPR_RECORD_REPLAY_TEST_FEATURE_ANNOTATION in t['annotations'].get(
1192          FEATURE_ANNOTATION, {}).get('value', ()) for t in test
1193  ])
1194
1195
1196def _GetWPRArchivePath(test):
1197  """Retrieves the archive path from the WPRArchiveDirectory annotation."""
1198  return test['annotations'].get(WPR_ARCHIVE_FILE_PATH_ANNOTATION,
1199                                 {}).get('value', ())
1200
1201
1202def _ReplaceUncommonChars(original):
1203  """Replaces uncommon characters with __."""
1204  if not original:
1205    raise ValueError('parameter should not be empty')
1206
1207  uncommon_chars = ['#']
1208  for char in uncommon_chars:
1209    original = original.replace(char, '__')
1210  return original
1211
1212
1213def _IsRenderTest(test):
1214  """Determines if a test or list of tests has a RenderTest amongst them."""
1215  if not isinstance(test, list):
1216    test = [test]
1217  return any([RENDER_TEST_FEATURE_ANNOTATION in t['annotations'].get(
1218              FEATURE_ANNOTATION, {}).get('value', ()) for t in test])
1219
1220
1221def _GenerateRenderTestHtml(image_name, failure_link, golden_link, diff_link):
1222  """Generates a RenderTest results page.
1223
1224  Displays the generated (failure) image, the golden image, and the diff
1225  between them.
1226
1227  Args:
1228    image_name: The name of the image whose comparison failed.
1229    failure_link: The URL to the generated/failure image.
1230    golden_link: The URL to the golden image.
1231    diff_link: The URL to the diff image between the failure and golden images.
1232
1233  Returns:
1234    A string containing the generated HTML.
1235  """
1236  jinja2_env = jinja2.Environment(
1237      loader=jinja2.FileSystemLoader(_JINJA_TEMPLATE_DIR), trim_blocks=True)
1238  template = jinja2_env.get_template(_JINJA_TEMPLATE_FILENAME)
1239  # pylint: disable=no-member
1240  return template.render(
1241      test_name=image_name,
1242      failure_link=failure_link,
1243      golden_link=golden_link,
1244      diff_link=diff_link)
1245
1246
1247def _FailTestIfNecessary(results):
1248  """Marks the given results as failed if it wasn't already.
1249
1250  Marks the result types as ResultType.FAIL unless they were already some sort
1251  of failure type, e.g. ResultType.CRASH.
1252
1253  Args:
1254    results: A list of base_test_result.BaseTestResult objects.
1255  """
1256  for result in results:
1257    if result.GetType() not in [
1258        base_test_result.ResultType.FAIL, base_test_result.ResultType.CRASH,
1259        base_test_result.ResultType.TIMEOUT, base_test_result.ResultType.UNKNOWN
1260    ]:
1261      result.SetType(base_test_result.ResultType.FAIL)
1262
1263
1264def _AppendToLog(results, line):
1265  """Appends the given line to the end of the logs of the given results.
1266
1267  Args:
1268    results: A list of base_test_result.BaseTestResult objects.
1269    line: A string to be appended as a neww line to the log of |result|.
1270  """
1271  for result in results:
1272    result.SetLog(result.GetLog() + '\n' + line)
1273
1274
1275def _SetLinkOnResults(results, link_name, link):
1276  """Sets the given link on the given results.
1277
1278  Args:
1279    results: A list of base_test_result.BaseTestResult objects.
1280    link_name: A string containing the name of the link being set.
1281    link: A string containing the lkink being set.
1282  """
1283  for result in results:
1284    result.SetLink(link_name, link)
1285