1# Copyright 2014 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import json
6import os
7import shutil
8import sys
9import tempfile
10import unittest
11import logging
12
13import mock
14
15from py_utils import cloud_storage
16from py_utils.constants import exit_codes
17
18from telemetry import benchmark
19from telemetry.core import exceptions
20from telemetry.core import util
21from telemetry.internal.actions import page_action
22from telemetry.internal.results import page_test_results
23from telemetry.internal.results import results_options
24from telemetry.internal import story_runner
25from telemetry.page import legacy_page_test
26from telemetry import story as story_module
27from telemetry.story import story_filter
28from telemetry.testing import fakes
29from telemetry.testing import options_for_unittests
30from telemetry.testing import test_stories
31from telemetry.web_perf import story_test
32from telemetry.wpr import archive_info
33
34
35class RunStorySetTest(unittest.TestCase):
36  """Tests that run dummy story sets with a mock StoryTest.
37
38  The main entry point for these tests is story_runner.RunStorySet.
39  """
40  def setUp(self):
41    self.options = options_for_unittests.GetRunOptions(
42        output_dir=tempfile.mkdtemp())
43    # We use a mock platform and story set, so tests can inspect which methods
44    # were called and easily override their behavior.
45    self.mock_platform = test_stories.TestSharedState.mock_platform
46    self.mock_story_test = mock.Mock(spec=story_test.StoryTest)
47
48  def tearDown(self):
49    shutil.rmtree(self.options.output_dir)
50
51  def RunStories(self, stories, **kwargs):
52    story_set = test_stories.DummyStorySet(stories)
53    with results_options.CreateResults(
54        self.options, benchmark_name='benchmark') as results:
55      story_runner.RunStorySet(
56          self.mock_story_test, story_set, self.options, results, **kwargs)
57
58  def ReadTestResults(self):
59    return results_options.ReadTestResults(self.options.intermediate_dir)
60
61  def testRunStorySet(self):
62    self.RunStories(['story1', 'story2', 'story3'])
63    test_results = self.ReadTestResults()
64    self.assertTrue(['PASS', 'PASS', 'PASS'],
65                    [test['status'] for test in test_results])
66
67  def testRunStoryWithLongName(self):
68    with self.assertRaises(ValueError):
69      self.RunStories(['l' * 182])
70
71  def testCallOrderInStoryTest(self):
72    """Check the call order of StoryTest methods is as expected."""
73    self.RunStories(['foo', 'bar', 'baz'])
74    self.assertEqual([call[0] for call in self.mock_story_test.mock_calls],
75                     ['WillRunStory', 'Measure', 'DidRunStory'] * 3)
76
77  @mock.patch.object(test_stories.TestSharedState, 'DidRunStory')
78  @mock.patch.object(test_stories.TestSharedState, 'RunStory')
79  @mock.patch.object(test_stories.TestSharedState, 'WillRunStory')
80  def testCallOrderBetweenStoryTestAndSharedState(
81      self, will_run_story, run_story, did_run_story):
82    """Check the call order between StoryTest and SharedState is correct."""
83    root_mock = mock.MagicMock()
84    root_mock.attach_mock(self.mock_story_test, 'test')
85    root_mock.attach_mock(will_run_story, 'state.WillRunStory')
86    root_mock.attach_mock(run_story, 'state.RunStory')
87    root_mock.attach_mock(did_run_story, 'state.DidRunStory')
88
89    self.RunStories(['story1'])
90    self.assertEqual([call[0] for call in root_mock.mock_calls], [
91        'test.WillRunStory',
92        'state.WillRunStory',
93        'state.RunStory',
94        'test.Measure',
95        'test.DidRunStory',
96        'state.DidRunStory'
97    ])
98
99  def testAppCrashExceptionCausesFailure(self):
100    self.RunStories([test_stories.DummyStory(
101        'story',
102        run_side_effect=exceptions.AppCrashException(msg='App Foo crashes'))])
103    test_results = self.ReadTestResults()
104    self.assertEqual(['FAIL'],
105                     [test['status'] for test in test_results])
106    self.assertIn('App Foo crashes', sys.stderr.getvalue())
107
108  @mock.patch.object(test_stories.TestSharedState, 'TearDownState')
109  def testExceptionRaisedInSharedStateTearDown(self, tear_down_state):
110    class TestOnlyException(Exception):
111      pass
112
113    tear_down_state.side_effect = TestOnlyException()
114    with self.assertRaises(TestOnlyException):
115      self.RunStories(['story'])
116
117  def testUnknownExceptionIsNotFatal(self):
118    class UnknownException(Exception):
119      pass
120
121    self.RunStories([
122        test_stories.DummyStory(
123            'foo', run_side_effect=UnknownException('FooException')),
124        test_stories.DummyStory('bar')])
125    test_results = self.ReadTestResults()
126    self.assertEqual(['FAIL', 'PASS'],
127                     [test['status'] for test in test_results])
128    self.assertIn('FooException', sys.stderr.getvalue())
129
130  def testRaiseBrowserGoneExceptionFromRunPage(self):
131    self.RunStories([
132        test_stories.DummyStory(
133            'foo', run_side_effect=exceptions.BrowserGoneException(
134                None, 'i am a browser crash message')),
135        test_stories.DummyStory('bar')])
136    test_results = self.ReadTestResults()
137    self.assertEqual(['FAIL', 'PASS'],
138                     [test['status'] for test in test_results])
139    self.assertIn('i am a browser crash message', sys.stderr.getvalue())
140
141  @mock.patch.object(test_stories.TestSharedState,
142                     'DumpStateUponStoryRunFailure')
143  @mock.patch.object(test_stories.TestSharedState, 'TearDownState')
144  def testAppCrashThenRaiseInTearDown_Interrupted(
145      self, tear_down_state, dump_state_upon_story_run_failure):
146    class TearDownStateException(Exception):
147      pass
148
149    tear_down_state.side_effect = TearDownStateException()
150    root_mock = mock.Mock()
151    root_mock.attach_mock(tear_down_state, 'state.TearDownState')
152    root_mock.attach_mock(dump_state_upon_story_run_failure,
153                          'state.DumpStateUponStoryRunFailure')
154    self.RunStories([
155        test_stories.DummyStory(
156            'foo', run_side_effect=exceptions.AppCrashException(msg='crash!')),
157        test_stories.DummyStory('bar')])
158
159    self.assertEqual([call[0] for call in root_mock.mock_calls], [
160        'state.DumpStateUponStoryRunFailure',
161        # This tear down happens because of the app crash.
162        'state.TearDownState',
163        # This one happens since state must be re-created to check whether
164        # later stories should be skipped or unexpectedly skipped. Then
165        # state is torn down normally at the end of the runs.
166        'state.TearDownState'
167    ])
168
169    test_results = self.ReadTestResults()
170    self.assertEqual(len(test_results), 2)
171    # First story unexpectedly failed with AppCrashException.
172    self.assertEqual(test_results[0]['status'], 'FAIL')
173    self.assertFalse(test_results[0]['expected'])
174    # Second story unexpectedly skipped due to exception during tear down.
175    self.assertEqual(test_results[1]['status'], 'SKIP')
176    self.assertFalse(test_results[1]['expected'])
177
178  def testPagesetRepeat(self):
179    self.options.pageset_repeat = 2
180    self.RunStories(['story1', 'story2'])
181    test_results = self.ReadTestResults()
182    self.assertEqual(['benchmark/story1', 'benchmark/story2'] * 2,
183                     [test['testPath'] for test in test_results])
184    self.assertEqual(['PASS', 'PASS', 'PASS', 'PASS'],
185                     [test['status'] for test in test_results])
186
187  def _testMaxFailuresOptionIsRespectedAndOverridable(
188      self, num_failing_stories, runner_max_failures, options_max_failures,
189      expected_num_failures, expected_num_skips):
190    if options_max_failures:
191      self.options.max_failures = options_max_failures
192    self.RunStories([
193        test_stories.DummyStory(
194            'failing_%d' % i, run_side_effect=Exception('boom!'))
195        for i in range(num_failing_stories)
196    ], max_failures=runner_max_failures)
197    test_results = self.ReadTestResults()
198    self.assertEqual(len(test_results),
199                     expected_num_failures + expected_num_skips)
200    for i, test in enumerate(test_results):
201      expected_status = 'FAIL' if i < expected_num_failures else 'SKIP'
202      self.assertEqual(test['status'], expected_status)
203
204  def testMaxFailuresNotSpecified(self):
205    self._testMaxFailuresOptionIsRespectedAndOverridable(
206        num_failing_stories=5, runner_max_failures=None,
207        options_max_failures=None, expected_num_failures=5,
208        expected_num_skips=0)
209
210  def testMaxFailuresSpecifiedToRun(self):
211    # Runs up to max_failures+1 failing tests before stopping, since
212    # every tests after max_failures failures have been encountered
213    # may all be passing.
214    self._testMaxFailuresOptionIsRespectedAndOverridable(
215        num_failing_stories=5, runner_max_failures=3,
216        options_max_failures=None, expected_num_failures=4,
217        expected_num_skips=1)
218
219  def testMaxFailuresOption(self):
220    # Runs up to max_failures+1 failing tests before stopping, since
221    # every tests after max_failures failures have been encountered
222    # may all be passing.
223    self._testMaxFailuresOptionIsRespectedAndOverridable(
224        num_failing_stories=5, runner_max_failures=3,
225        options_max_failures=1, expected_num_failures=2,
226        expected_num_skips=3)
227
228
229class UpdateAndCheckArchivesTest(unittest.TestCase):
230  """Tests for the private _UpdateAndCheckArchives."""
231  def setUp(self):
232    mock.patch.object(archive_info.WprArchiveInfo,
233                      'DownloadArchivesIfNeeded').start()
234    self._mock_story_filter = mock.Mock()
235    self._mock_story_filter.ShouldSkip.return_value = False
236
237  def tearDown(self):
238    mock.patch.stopall()
239
240  def testMissingArchiveDataFile(self):
241    story_set = test_stories.DummyStorySet(['story'])
242    with self.assertRaises(story_runner.ArchiveError):
243      story_runner._UpdateAndCheckArchives(
244          story_set.archive_data_file, story_set.wpr_archive_info,
245          story_set.stories, self._mock_story_filter)
246
247
248  def testMissingArchiveDataFileWithSkippedStory(self):
249    story_set = test_stories.DummyStorySet(['story'])
250    self._mock_story_filter.ShouldSkip.return_value = True
251    success = story_runner._UpdateAndCheckArchives(
252        story_set.archive_data_file, story_set.wpr_archive_info,
253        story_set.stories, self._mock_story_filter)
254    self.assertTrue(success)
255
256  def testArchiveDataFileDoesNotExist(self):
257    story_set = test_stories.DummyStorySet(
258        ['story'], archive_data_file='does_not_exist.json')
259    with self.assertRaises(story_runner.ArchiveError):
260      story_runner._UpdateAndCheckArchives(
261          story_set.archive_data_file, story_set.wpr_archive_info,
262          story_set.stories, self._mock_story_filter)
263
264  def testUpdateAndCheckArchivesSuccess(self):
265    # This test file has a recording for a 'http://www.testurl.com' story only.
266    archive_data_file = os.path.join(
267        util.GetUnittestDataDir(), 'archive_files', 'test.json')
268    story_set = test_stories.DummyStorySet(
269        ['http://www.testurl.com'], archive_data_file=archive_data_file)
270    success = story_runner._UpdateAndCheckArchives(
271        story_set.archive_data_file, story_set.wpr_archive_info,
272        story_set.stories, self._mock_story_filter)
273    self.assertTrue(success)
274
275  def testArchiveWithMissingStory(self):
276    # This test file has a recording for a 'http://www.testurl.com' story only.
277    archive_data_file = os.path.join(
278        util.GetUnittestDataDir(), 'archive_files', 'test.json')
279    story_set = test_stories.DummyStorySet(
280        ['http://www.testurl.com', 'http://www.google.com'],
281        archive_data_file=archive_data_file)
282    with self.assertRaises(story_runner.ArchiveError):
283      story_runner._UpdateAndCheckArchives(
284          story_set.archive_data_file, story_set.wpr_archive_info,
285          story_set.stories, self._mock_story_filter)
286
287  def testArchiveWithMissingWprFile(self):
288    # This test file claims to have recordings for both
289    # 'http://www.testurl.com' and 'http://www.google.com'; but the file with
290    # the wpr recording for the later story is actually missing.
291    archive_data_file = os.path.join(
292        util.GetUnittestDataDir(), 'archive_files',
293        'test_missing_wpr_file.json')
294    story_set = test_stories.DummyStorySet(
295        ['http://www.testurl.com', 'http://www.google.com'],
296        archive_data_file=archive_data_file)
297    with self.assertRaises(story_runner.ArchiveError):
298      story_runner._UpdateAndCheckArchives(
299          story_set.archive_data_file, story_set.wpr_archive_info,
300          story_set.stories, self._mock_story_filter)
301
302
303class RunStoryAndProcessErrorIfNeededTest(unittest.TestCase):
304  """Tests for the private _RunStoryAndProcessErrorIfNeeded.
305
306  All these tests:
307  - Use mocks for all objects, including stories. No real browser is involved.
308  - Call story_runner._RunStoryAndProcessErrorIfNeeded as entry point.
309  """
310  def _CreateErrorProcessingMock(self, method_exceptions=None,
311                                 legacy_test=False):
312    if legacy_test:
313      test_class = legacy_page_test.LegacyPageTest
314    else:
315      test_class = story_test.StoryTest
316
317    root_mock = mock.NonCallableMock(
318        story=mock.NonCallableMagicMock(story_module.Story),
319        results=mock.NonCallableMagicMock(page_test_results.PageTestResults),
320        test=mock.NonCallableMagicMock(test_class),
321        state=mock.NonCallableMagicMock(
322            story_module.SharedState,
323            CanRunStory=mock.Mock(return_value=True)))
324
325    if method_exceptions:
326      root_mock.configure_mock(**{
327          path + '.side_effect': exception
328          for path, exception in method_exceptions.iteritems()})
329
330    return root_mock
331
332  def testRunStoryAndProcessErrorIfNeeded_success(self):
333    root_mock = self._CreateErrorProcessingMock()
334
335    story_runner._RunStoryAndProcessErrorIfNeeded(
336        root_mock.story, root_mock.results, root_mock.state, root_mock.test)
337
338    self.assertEquals(root_mock.method_calls, [
339        mock.call.results.CreateArtifact('logs.txt'),
340        mock.call.test.WillRunStory(root_mock.state.platform),
341        mock.call.state.WillRunStory(root_mock.story),
342        mock.call.state.CanRunStory(root_mock.story),
343        mock.call.state.RunStory(root_mock.results),
344        mock.call.test.Measure(root_mock.state.platform, root_mock.results),
345        mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
346        mock.call.state.DidRunStory(root_mock.results),
347    ])
348
349  def testRunStoryAndProcessErrorIfNeeded_successLegacy(self):
350    root_mock = self._CreateErrorProcessingMock(legacy_test=True)
351
352    story_runner._RunStoryAndProcessErrorIfNeeded(
353        root_mock.story, root_mock.results, root_mock.state, root_mock.test)
354
355    self.assertEquals(root_mock.method_calls, [
356        mock.call.results.CreateArtifact('logs.txt'),
357        mock.call.state.WillRunStory(root_mock.story),
358        mock.call.state.CanRunStory(root_mock.story),
359        mock.call.state.RunStory(root_mock.results),
360        mock.call.test.DidRunPage(root_mock.state.platform),
361        mock.call.state.DidRunStory(root_mock.results),
362    ])
363
364  def testRunStoryAndProcessErrorIfNeeded_tryTimeout(self):
365    root_mock = self._CreateErrorProcessingMock(method_exceptions={
366        'state.WillRunStory': exceptions.TimeoutException('foo')
367    })
368
369    story_runner._RunStoryAndProcessErrorIfNeeded(
370        root_mock.story, root_mock.results, root_mock.state, root_mock.test)
371
372    self.assertEquals(root_mock.method_calls, [
373        mock.call.results.CreateArtifact('logs.txt'),
374        mock.call.test.WillRunStory(root_mock.state.platform),
375        mock.call.state.WillRunStory(root_mock.story),
376        mock.call.state.DumpStateUponStoryRunFailure(root_mock.results),
377        mock.call.results.Fail(
378            'Exception raised running %s' % root_mock.story.name),
379        mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
380        mock.call.state.DidRunStory(root_mock.results),
381    ])
382
383  def testRunStoryAndProcessErrorIfNeeded_tryAppCrash(self):
384    tmp = tempfile.NamedTemporaryFile(delete=False)
385    tmp.close()
386    temp_file_path = tmp.name
387    fake_app = fakes.FakeApp()
388    fake_app.recent_minidump_path = temp_file_path
389    try:
390      app_crash_exception = exceptions.AppCrashException(fake_app, msg='foo')
391      root_mock = self._CreateErrorProcessingMock(method_exceptions={
392          'state.WillRunStory': app_crash_exception
393      })
394
395      with self.assertRaises(exceptions.AppCrashException):
396        story_runner._RunStoryAndProcessErrorIfNeeded(
397            root_mock.story, root_mock.results, root_mock.state, root_mock.test)
398
399      self.assertListEqual(root_mock.method_calls, [
400          mock.call.results.CreateArtifact('logs.txt'),
401          mock.call.test.WillRunStory(root_mock.state.platform),
402          mock.call.state.WillRunStory(root_mock.story),
403          mock.call.state.DumpStateUponStoryRunFailure(root_mock.results),
404          mock.call.results.Fail(
405              'Exception raised running %s' % root_mock.story.name),
406          mock.call.test.DidRunStory(
407              root_mock.state.platform, root_mock.results),
408          mock.call.state.DidRunStory(root_mock.results),
409      ])
410    finally:
411      os.remove(temp_file_path)
412
413  def testRunStoryAndProcessErrorIfNeeded_tryError(self):
414    root_mock = self._CreateErrorProcessingMock(method_exceptions={
415        'state.CanRunStory': exceptions.Error('foo')
416    })
417
418    with self.assertRaisesRegexp(exceptions.Error, 'foo'):
419      story_runner._RunStoryAndProcessErrorIfNeeded(
420          root_mock.story, root_mock.results, root_mock.state, root_mock.test)
421
422    self.assertEquals(root_mock.method_calls, [
423        mock.call.results.CreateArtifact('logs.txt'),
424        mock.call.test.WillRunStory(root_mock.state.platform),
425        mock.call.state.WillRunStory(root_mock.story),
426        mock.call.state.CanRunStory(root_mock.story),
427        mock.call.state.DumpStateUponStoryRunFailure(root_mock.results),
428        mock.call.results.Fail(
429            'Exception raised running %s' % root_mock.story.name),
430        mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
431        mock.call.state.DidRunStory(root_mock.results),
432    ])
433
434  def testRunStoryAndProcessErrorIfNeeded_tryUnsupportedAction(self):
435    root_mock = self._CreateErrorProcessingMock(method_exceptions={
436        'state.RunStory': page_action.PageActionNotSupported('foo')
437    })
438
439    story_runner._RunStoryAndProcessErrorIfNeeded(
440        root_mock.story, root_mock.results, root_mock.state, root_mock.test)
441    self.assertEquals(root_mock.method_calls, [
442        mock.call.results.CreateArtifact('logs.txt'),
443        mock.call.test.WillRunStory(root_mock.state.platform),
444        mock.call.state.WillRunStory(root_mock.story),
445        mock.call.state.CanRunStory(root_mock.story),
446        mock.call.state.RunStory(root_mock.results),
447        mock.call.results.Skip('Unsupported page action: foo'),
448        mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
449        mock.call.state.DidRunStory(root_mock.results),
450    ])
451
452  def testRunStoryAndProcessErrorIfNeeded_tryUnhandlable(self):
453    root_mock = self._CreateErrorProcessingMock(method_exceptions={
454        'test.WillRunStory': Exception('foo')
455    })
456
457    with self.assertRaisesRegexp(Exception, 'foo'):
458      story_runner._RunStoryAndProcessErrorIfNeeded(
459          root_mock.story, root_mock.results, root_mock.state, root_mock.test)
460
461    self.assertEquals(root_mock.method_calls, [
462        mock.call.results.CreateArtifact('logs.txt'),
463        mock.call.test.WillRunStory(root_mock.state.platform),
464        mock.call.state.DumpStateUponStoryRunFailure(root_mock.results),
465        mock.call.results.Fail(
466            'Exception raised running %s' % root_mock.story.name),
467        mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
468        mock.call.state.DidRunStory(root_mock.results),
469    ])
470
471  def testRunStoryAndProcessErrorIfNeeded_finallyException(self):
472    exc = Exception('bar')
473    root_mock = self._CreateErrorProcessingMock(method_exceptions={
474        'state.DidRunStory': exc,
475    })
476
477    with self.assertRaisesRegexp(Exception, 'bar'):
478      story_runner._RunStoryAndProcessErrorIfNeeded(
479          root_mock.story, root_mock.results, root_mock.state, root_mock.test)
480
481    self.assertEquals(root_mock.method_calls, [
482        mock.call.results.CreateArtifact('logs.txt'),
483        mock.call.test.WillRunStory(root_mock.state.platform),
484        mock.call.state.WillRunStory(root_mock.story),
485        mock.call.state.CanRunStory(root_mock.story),
486        mock.call.state.RunStory(root_mock.results),
487        mock.call.test.Measure(root_mock.state.platform, root_mock.results),
488        mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
489        mock.call.state.DidRunStory(root_mock.results),
490        mock.call.state.DumpStateUponStoryRunFailure(root_mock.results),
491    ])
492
493  def testRunStoryAndProcessErrorIfNeeded_tryTimeout_finallyException(self):
494    root_mock = self._CreateErrorProcessingMock(method_exceptions={
495        'state.RunStory': exceptions.TimeoutException('foo'),
496        'state.DidRunStory': Exception('bar')
497    })
498
499    story_runner._RunStoryAndProcessErrorIfNeeded(
500        root_mock.story, root_mock.results, root_mock.state, root_mock.test)
501
502    self.assertEquals(root_mock.method_calls, [
503        mock.call.results.CreateArtifact('logs.txt'),
504        mock.call.test.WillRunStory(root_mock.state.platform),
505        mock.call.state.WillRunStory(root_mock.story),
506        mock.call.state.CanRunStory(root_mock.story),
507        mock.call.state.RunStory(root_mock.results),
508        mock.call.state.DumpStateUponStoryRunFailure(root_mock.results),
509        mock.call.results.Fail(
510            'Exception raised running %s' % root_mock.story.name),
511        mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
512        mock.call.state.DidRunStory(root_mock.results),
513    ])
514
515  def testRunStoryAndProcessErrorIfNeeded_tryError_finallyException(self):
516    root_mock = self._CreateErrorProcessingMock(method_exceptions={
517        'state.WillRunStory': exceptions.Error('foo'),
518        'test.DidRunStory': Exception('bar')
519    })
520
521    with self.assertRaisesRegexp(exceptions.Error, 'foo'):
522      story_runner._RunStoryAndProcessErrorIfNeeded(
523          root_mock.story, root_mock.results, root_mock.state, root_mock.test)
524
525    self.assertEquals(root_mock.method_calls, [
526        mock.call.results.CreateArtifact('logs.txt'),
527        mock.call.test.WillRunStory(root_mock.state.platform),
528        mock.call.state.WillRunStory(root_mock.story),
529        mock.call.state.DumpStateUponStoryRunFailure(root_mock.results),
530        mock.call.results.Fail(
531            'Exception raised running %s' % root_mock.story.name),
532        mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
533    ])
534
535  def testRunStoryAndProcessErrorIfNeeded_tryUnsupportedAction_finallyException(
536      self):
537    root_mock = self._CreateErrorProcessingMock(method_exceptions={
538        'test.WillRunStory': page_action.PageActionNotSupported('foo'),
539        'state.DidRunStory': Exception('bar')
540    })
541
542    story_runner._RunStoryAndProcessErrorIfNeeded(
543        root_mock.story, root_mock.results, root_mock.state, root_mock.test)
544
545    self.assertEquals(root_mock.method_calls, [
546        mock.call.results.CreateArtifact('logs.txt'),
547        mock.call.test.WillRunStory(root_mock.state.platform),
548        mock.call.results.Skip('Unsupported page action: foo'),
549        mock.call.test.DidRunStory(
550            root_mock.state.platform, root_mock.results),
551        mock.call.state.DidRunStory(root_mock.results),
552    ])
553
554  def testRunStoryAndProcessErrorIfNeeded_tryUnhandlable_finallyException(self):
555    root_mock = self._CreateErrorProcessingMock(method_exceptions={
556        'test.Measure': Exception('foo'),
557        'test.DidRunStory': Exception('bar')
558    })
559
560    with self.assertRaisesRegexp(Exception, 'foo'):
561      story_runner._RunStoryAndProcessErrorIfNeeded(
562          root_mock.story, root_mock.results, root_mock.state, root_mock.test)
563
564    self.assertEquals(root_mock.method_calls, [
565        mock.call.results.CreateArtifact('logs.txt'),
566        mock.call.test.WillRunStory(root_mock.state.platform),
567        mock.call.state.WillRunStory(root_mock.story),
568        mock.call.state.CanRunStory(root_mock.story),
569        mock.call.state.RunStory(root_mock.results),
570        mock.call.test.Measure(root_mock.state.platform, root_mock.results),
571        mock.call.state.DumpStateUponStoryRunFailure(root_mock.results),
572        mock.call.results.Fail(
573            'Exception raised running %s' % root_mock.story.name),
574        mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
575    ])
576
577
578class FakeBenchmark(benchmark.Benchmark):
579  test = test_stories.DummyStoryTest
580  NAME = 'fake_benchmark'
581
582  def __init__(self, stories=None, **kwargs):
583    """A customizable fake_benchmark.
584
585    Args:
586      stories: Optional sequence of either story names or objects. Instances
587        of DummyStory are useful here. If omitted the benchmark will contain
588        a single DummyStory.
589      other kwargs are passed to the test_stories.DummyStorySet constructor.
590    """
591    super(FakeBenchmark, self).__init__()
592    self._story_set = test_stories.DummyStorySet(
593        stories if stories is not None else ['story'], **kwargs)
594
595  @classmethod
596  def Name(cls):
597    return cls.NAME
598
599  def CreateStorySet(self, _):
600    return self._story_set
601
602
603class FakeStoryFilter(object):
604  def __init__(self, stories_to_filter_out=None, stories_to_skip=None):
605    self._stories_to_filter = stories_to_filter_out or []
606    self._stories_to_skip = stories_to_skip or []
607    assert isinstance(self._stories_to_filter, list)
608    assert isinstance(self._stories_to_skip, list)
609
610  def FilterStories(self, story_set):
611    return [story for story in story_set
612            if story.name not in self._stories_to_filter]
613
614  def ShouldSkip(self, story):
615    return 'fake_reason' if story.name in self._stories_to_skip else ''
616
617
618def ReadDiagnostics(test_result):
619  artifact = test_result['outputArtifacts'][page_test_results.DIAGNOSTICS_NAME]
620  with open(artifact['filePath']) as f:
621    return json.load(f)['diagnostics']
622
623
624class RunBenchmarkTest(unittest.TestCase):
625  """Tests that run fake benchmarks, no real browser is involved.
626
627  All these tests:
628  - Use a FakeBenchmark instance.
629  - Call GetFakeBrowserOptions to get options for a fake browser.
630  - Call story_runner.RunBenchmark as entry point.
631  """
632  def setUp(self):
633    self.output_dir = tempfile.mkdtemp()
634
635  def tearDown(self):
636    shutil.rmtree(self.output_dir)
637
638  def GetFakeBrowserOptions(self, overrides=None):
639    return options_for_unittests.GetRunOptions(
640        output_dir=self.output_dir,
641        fake_browser=True, overrides=overrides)
642
643  def ReadTestResults(self):
644    return results_options.ReadTestResults(
645        os.path.join(self.output_dir, 'artifacts'))
646
647  def testDisabledBenchmarkViaCanRunOnPlatform(self):
648    fake_benchmark = FakeBenchmark()
649    fake_benchmark.SUPPORTED_PLATFORMS = []
650    options = self.GetFakeBrowserOptions()
651    story_runner.RunBenchmark(fake_benchmark, options)
652    test_results = self.ReadTestResults()
653    self.assertFalse(test_results)  # No tests ran at all.
654
655  def testSkippedWithStoryFilter(self):
656    fake_benchmark = FakeBenchmark(stories=['fake_story'])
657    options = self.GetFakeBrowserOptions()
658    fake_story_filter = FakeStoryFilter(stories_to_skip=['fake_story'])
659    with mock.patch(
660        'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
661        return_value=fake_story_filter):
662      story_runner.RunBenchmark(fake_benchmark, options)
663    test_results = self.ReadTestResults()
664    self.assertTrue(test_results)  # Some tests ran, but all skipped.
665    self.assertTrue(all(t['status'] == 'SKIP' for t in test_results))
666
667  def testOneStorySkippedOneNot(self):
668    fake_story_filter = FakeStoryFilter(stories_to_skip=['story1'])
669    fake_benchmark = FakeBenchmark(stories=['story1', 'story2'])
670    options = self.GetFakeBrowserOptions()
671    with mock.patch(
672        'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
673        return_value=fake_story_filter):
674      story_runner.RunBenchmark(fake_benchmark, options)
675    test_results = self.ReadTestResults()
676    status = [t['status'] for t in test_results]
677    self.assertEqual(len(status), 2)
678    self.assertIn('SKIP', status)
679    self.assertIn('PASS', status)
680
681  def testOneStoryFilteredOneNot(self):
682    fake_story_filter = FakeStoryFilter(stories_to_filter_out=['story1'])
683    fake_benchmark = FakeBenchmark(stories=['story1', 'story2'])
684    options = self.GetFakeBrowserOptions()
685    with mock.patch(
686        'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
687        return_value=fake_story_filter):
688      story_runner.RunBenchmark(fake_benchmark, options)
689    test_results = self.ReadTestResults()
690    self.assertEqual(len(test_results), 1)
691    self.assertEqual(test_results[0]['status'], 'PASS')
692    self.assertTrue(test_results[0]['testPath'].endswith('/story2'))
693
694  def testValidateBenchmarkName(self):
695    class FakeBenchmarkWithBadName(FakeBenchmark):
696      NAME = 'bad/benchmark (name)'
697
698    fake_benchmark = FakeBenchmarkWithBadName()
699    options = self.GetFakeBrowserOptions()
700    return_code = story_runner.RunBenchmark(fake_benchmark, options)
701    self.assertEqual(return_code, 2)
702    self.assertIn('Invalid benchmark name', sys.stderr.getvalue())
703
704  def testWithOwnerInfo(self):
705
706    @benchmark.Owner(emails=['alice@chromium.org', 'bob@chromium.org'],
707                     component='fooBar',
708                     documentation_url='https://example.com/')
709    class FakeBenchmarkWithOwner(FakeBenchmark):
710      pass
711
712    fake_benchmark = FakeBenchmarkWithOwner()
713    options = self.GetFakeBrowserOptions()
714    story_runner.RunBenchmark(fake_benchmark, options)
715    test_results = self.ReadTestResults()
716    diagnostics = ReadDiagnostics(test_results[0])
717    self.assertEqual(diagnostics['owners'],
718                     ['alice@chromium.org', 'bob@chromium.org'])
719    self.assertEqual(diagnostics['bugComponents'], ['fooBar'])
720    self.assertEqual(diagnostics['documentationLinks'],
721                     [['Benchmark documentation link', 'https://example.com/']])
722
723  def testWithOwnerInfoButNoUrl(self):
724
725    @benchmark.Owner(emails=['alice@chromium.org'])
726    class FakeBenchmarkWithOwner(FakeBenchmark):
727      pass
728
729    fake_benchmark = FakeBenchmarkWithOwner()
730    options = self.GetFakeBrowserOptions()
731    story_runner.RunBenchmark(fake_benchmark, options)
732    test_results = self.ReadTestResults()
733    diagnostics = ReadDiagnostics(test_results[0])
734    self.assertEqual(diagnostics['owners'], ['alice@chromium.org'])
735    self.assertNotIn('documentationLinks', diagnostics)
736
737  def testDeviceInfo(self):
738    fake_benchmark = FakeBenchmark(stories=['fake_story'])
739    options = self.GetFakeBrowserOptions()
740    options.fake_possible_browser = fakes.FakePossibleBrowser(
741        arch_name='abc', os_name='win', os_version_name='win10')
742    story_runner.RunBenchmark(fake_benchmark, options)
743    test_results = self.ReadTestResults()
744    diagnostics = ReadDiagnostics(test_results[0])
745    self.assertEqual(diagnostics['architectures'], ['abc'])
746    self.assertEqual(diagnostics['osNames'], ['win'])
747    self.assertEqual(diagnostics['osVersions'], ['win10'])
748
749  def testReturnCodeDisabledStory(self):
750    fake_benchmark = FakeBenchmark(stories=['fake_story'])
751    fake_story_filter = FakeStoryFilter(stories_to_skip=['fake_story'])
752    options = self.GetFakeBrowserOptions()
753    with mock.patch(
754        'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
755        return_value=fake_story_filter):
756      return_code = story_runner.RunBenchmark(fake_benchmark, options)
757    self.assertEqual(return_code, exit_codes.ALL_TESTS_SKIPPED)
758
759  def testReturnCodeSuccessfulRun(self):
760    fake_benchmark = FakeBenchmark()
761    options = self.GetFakeBrowserOptions()
762    return_code = story_runner.RunBenchmark(fake_benchmark, options)
763    self.assertEqual(return_code, exit_codes.SUCCESS)
764
765  def testReturnCodeCaughtException(self):
766    fake_benchmark = FakeBenchmark(stories=[
767        test_stories.DummyStory(
768            'story', run_side_effect=exceptions.AppCrashException())])
769    options = self.GetFakeBrowserOptions()
770    return_code = story_runner.RunBenchmark(fake_benchmark, options)
771    self.assertEqual(return_code, exit_codes.TEST_FAILURE)
772
773  def testReturnCodeUnhandleableError(self):
774    fake_benchmark = FakeBenchmark(stories=[
775        test_stories.DummyStory(
776            'story', run_side_effect=MemoryError('Unhandleable'))])
777    options = self.GetFakeBrowserOptions()
778    return_code = story_runner.RunBenchmark(fake_benchmark, options)
779    self.assertEqual(return_code, exit_codes.FATAL_ERROR)
780
781  def testRunStoryWithMissingArchiveFile(self):
782    fake_benchmark = FakeBenchmark(archive_data_file='data/does-not-exist.json')
783    options = self.GetFakeBrowserOptions()
784    return_code = story_runner.RunBenchmark(fake_benchmark, options)
785    self.assertEqual(return_code, 2)  # Benchmark was interrupted.
786    self.assertIn('ArchiveError', sys.stderr.getvalue())
787
788  def testDownloadMinimalServingDirs(self):
789    fake_benchmark = FakeBenchmark(stories=[
790        test_stories.DummyStory(
791            'story_foo', serving_dir='/files/foo', tags=['foo']),
792        test_stories.DummyStory(
793            'story_bar', serving_dir='/files/bar', tags=['bar']),
794    ], cloud_bucket=cloud_storage.PUBLIC_BUCKET)
795    options = self.GetFakeBrowserOptions(overrides={'story_tag_filter': 'foo'})
796    with mock.patch(
797        'py_utils.cloud_storage.GetFilesInDirectoryIfChanged') as get_files:
798      story_runner.RunBenchmark(fake_benchmark, options)
799
800    # Foo is the only included story serving dir.
801    self.assertEqual(get_files.call_count, 1)
802    get_files.assert_called_once_with('/files/foo', cloud_storage.PUBLIC_BUCKET)
803
804  def testAbridged(self):
805    options = self.GetFakeBrowserOptions()
806    options.run_abridged_story_set = True
807    story_filter.StoryFilterFactory.ProcessCommandLineArgs(
808        parser=None, args=options)
809    fake_benchmark = FakeBenchmark(stories=[
810        test_stories.DummyStory('story1', tags=['important']),
811        test_stories.DummyStory('story2', tags=['other']),
812    ], abridging_tag='important')
813    story_runner.RunBenchmark(fake_benchmark, options)
814    test_results = self.ReadTestResults()
815    self.assertEqual(len(test_results), 1)
816    self.assertTrue(test_results[0]['testPath'].endswith('/story1'))
817
818  def testFullRun(self):
819    options = self.GetFakeBrowserOptions()
820    story_filter.StoryFilterFactory.ProcessCommandLineArgs(
821        parser=None, args=options)
822    fake_benchmark = FakeBenchmark(stories=[
823        test_stories.DummyStory('story1', tags=['important']),
824        test_stories.DummyStory('story2', tags=['other']),
825    ], abridging_tag='important')
826    story_runner.RunBenchmark(fake_benchmark, options)
827    test_results = self.ReadTestResults()
828    self.assertEqual(len(test_results), 2)
829
830  def testStoryFlag(self):
831    options = self.GetFakeBrowserOptions()
832    args = fakes.FakeParsedArgsForStoryFilter(stories=['story1', 'story3'])
833    story_filter.StoryFilterFactory.ProcessCommandLineArgs(
834        parser=None, args=args)
835    fake_benchmark = FakeBenchmark(stories=['story1', 'story2', 'story3'])
836    story_runner.RunBenchmark(fake_benchmark, options)
837    test_results = self.ReadTestResults()
838    self.assertEqual(len(test_results), 2)
839    self.assertTrue(test_results[0]['testPath'].endswith('/story1'))
840    self.assertTrue(test_results[1]['testPath'].endswith('/story3'))
841
842  def testArtifactLogsContainHandleableException(self):
843    def failed_run():
844      logging.warning('This will fail gracefully')
845      raise exceptions.TimeoutException('karma!')
846
847    fake_benchmark = FakeBenchmark(stories=[
848        test_stories.DummyStory('story1', run_side_effect=failed_run),
849        test_stories.DummyStory('story2')
850    ])
851
852    options = self.GetFakeBrowserOptions()
853    return_code = story_runner.RunBenchmark(fake_benchmark, options)
854    self.assertEqual(return_code, exit_codes.TEST_FAILURE)
855    test_results = self.ReadTestResults()
856    self.assertEqual(len(test_results), 2)
857
858    # First story failed.
859    self.assertEqual(test_results[0]['testPath'], 'fake_benchmark/story1')
860    self.assertEqual(test_results[0]['status'], 'FAIL')
861    self.assertIn('logs.txt', test_results[0]['outputArtifacts'])
862
863    with open(test_results[0]['outputArtifacts']['logs.txt']['filePath']) as f:
864      test_log = f.read()
865
866    # Ensure that the log contains warning messages and python stack.
867    self.assertIn('Handleable error', test_log)
868    self.assertIn('This will fail gracefully', test_log)
869    self.assertIn("raise exceptions.TimeoutException('karma!')", test_log)
870
871    # Second story ran fine.
872    self.assertEqual(test_results[1]['testPath'], 'fake_benchmark/story2')
873    self.assertEqual(test_results[1]['status'], 'PASS')
874
875  def testArtifactLogsContainUnhandleableException(self):
876    def failed_run():
877      logging.warning('This will fail badly')
878      raise MemoryError('this is a fatal exception')
879
880    fake_benchmark = FakeBenchmark(stories=[
881        test_stories.DummyStory('story1', run_side_effect=failed_run),
882        test_stories.DummyStory('story2')
883    ])
884
885    options = self.GetFakeBrowserOptions()
886    return_code = story_runner.RunBenchmark(fake_benchmark, options)
887    self.assertEqual(return_code, exit_codes.FATAL_ERROR)
888    test_results = self.ReadTestResults()
889    self.assertEqual(len(test_results), 2)
890
891    # First story failed.
892    self.assertEqual(test_results[0]['testPath'], 'fake_benchmark/story1')
893    self.assertEqual(test_results[0]['status'], 'FAIL')
894    self.assertIn('logs.txt', test_results[0]['outputArtifacts'])
895
896    with open(test_results[0]['outputArtifacts']['logs.txt']['filePath']) as f:
897      test_log = f.read()
898
899    # Ensure that the log contains warning messages and python stack.
900    self.assertIn('Unhandleable error', test_log)
901    self.assertIn('This will fail badly', test_log)
902    self.assertIn("raise MemoryError('this is a fatal exception')", test_log)
903
904    # Second story was skipped.
905    self.assertEqual(test_results[1]['testPath'], 'fake_benchmark/story2')
906    self.assertEqual(test_results[1]['status'], 'SKIP')
907
908  def testUnexpectedSkipsWithFiltering(self):
909    # We prepare side effects for 50 stories, the first 30 run fine, the
910    # remaining 20 fail with a fatal error.
911    fatal_error = MemoryError('this is an unexpected exception')
912    side_effects = [None] * 30 + [fatal_error] * 20
913
914    fake_benchmark = FakeBenchmark(stories=(
915        test_stories.DummyStory('story_%i' % i, run_side_effect=effect)
916        for i, effect in enumerate(side_effects)))
917
918    # Set the filtering to only run from story_10 --> story_40
919    options = self.GetFakeBrowserOptions({
920        'story_shard_begin_index': 10,
921        'story_shard_end_index': 41})
922    return_code = story_runner.RunBenchmark(fake_benchmark, options)
923    self.assertEquals(exit_codes.FATAL_ERROR, return_code)
924
925    # The results should contain entries of story 10 --> story 40. Of those
926    # entries, story 31's actual result is 'FAIL' and
927    # stories from 31 to 40 will shows 'SKIP'.
928    test_results = self.ReadTestResults()
929    self.assertEqual(len(test_results), 31)
930
931    expected = []
932    expected.extend(('story_%i' % i, 'PASS') for i in xrange(10, 30))
933    expected.append(('story_30', 'FAIL'))
934    expected.extend(('story_%i' % i, 'SKIP') for i in xrange(31, 41))
935
936    for (story, status), result in zip(expected, test_results):
937      self.assertEqual(result['testPath'], 'fake_benchmark/%s' % story)
938      self.assertEqual(result['status'], status)
939