1import sys
2import os
3import tempfile
4
5from xml.etree import ElementTree as ET
6from nose2.tests._common import FunctionalTestCase
7
8
9def read_report(path):
10    with open(path, 'r', encoding='utf-8') as f:
11        return ET.parse(f).getroot()
12
13
14class TestSubtests(FunctionalTestCase):
15
16    def setUp(self):
17        super(TestSubtests, self).setUp()
18        if sys.version_info < (3, 4):
19            self.skipTest('Python >= 3.4 required')
20
21    def test_success(self):
22        proc = self.runIn(
23            'scenario/subtests',
24            '-v',
25            'test_subtests.Case.test_subtest_success'
26        )
27        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
28        self.assertTestRunOutputMatches(proc, stderr='test_subtest_success')
29        self.assertTestRunOutputMatches(proc, stderr='OK')
30        self.assertEqual(proc.poll(), 0)
31
32    def test_failure(self):
33        proc = self.runIn(
34            'scenario/subtests',
35            '-v',
36            'test_subtests.Case.test_subtest_failure'
37        )
38        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
39        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=1\)')
40        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=3\)')
41        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=5\)')
42        self.assertTestRunOutputMatches(proc, stderr='FAILED \(failures=3\)')
43        self.assertEqual(proc.poll(), 1)
44
45    def test_error(self):
46        proc = self.runIn(
47            'scenario/subtests',
48            '-v',
49            'test_subtests.Case.test_subtest_error'
50        )
51        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
52        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=0\)')
53        self.assertTestRunOutputMatches(proc, stderr='RuntimeError: 0')
54        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=1\)')
55        self.assertTestRunOutputMatches(proc, stderr='RuntimeError: 1')
56        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=2\)')
57        self.assertTestRunOutputMatches(proc, stderr='RuntimeError: 2')
58        self.assertTestRunOutputMatches(proc, stderr='FAILED \(errors=3\)')
59        self.assertEqual(proc.poll(), 1)
60
61    def test_expected_failure(self):
62        proc = self.runIn(
63            'scenario/subtests',
64            '-v',
65            'test_subtests.Case.test_subtest_expected_failure'
66        )
67        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
68        self.assertTestRunOutputMatches(proc, stderr='test_subtest_expected_failure.*expected failure')
69        self.assertTestRunOutputMatches(proc, stderr='OK \(expected failures=1\)')
70        self.assertEqual(proc.poll(), 0)
71
72    def test_message(self):
73        proc = self.runIn(
74            'scenario/subtests',
75            '-v',
76            'test_subtests.Case.test_subtest_message'
77        )
78        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
79        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=1\)')
80        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=3\)')
81        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=5\)')
82        self.assertTestRunOutputMatches(proc, stderr='FAILED \(failures=3\)')
83        self.assertEqual(proc.poll(), 1)
84
85    def test_all(self):
86        proc = self.runIn(
87            'scenario/subtests',
88            '-v',
89            'test_subtests'
90        )
91        self.assertTestRunOutputMatches(proc, stderr='Ran 5 tests')
92        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=1\)')
93        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=3\)')
94        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=5\)')
95        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=1\)')
96        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=3\)')
97        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=5\)')
98        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=0\)')
99        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=1\)')
100        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=2\)')
101        self.assertTestRunOutputMatches(proc, stderr='FAILED \(failures=6, errors=3, expected failures=1\)')
102        self.assertEqual(proc.poll(), 1)
103
104
105class TestSubtestsMultiprocess(FunctionalTestCase):
106
107    def setUp(self):
108        super(TestSubtestsMultiprocess, self).setUp()
109        if sys.version_info < (3, 4):
110            self.skipTest('Python >= 3.4 required')
111
112    def test_success(self):
113        proc = self.runIn(
114            'scenario/subtests',
115            '--plugin=nose2.plugins.mp',
116            '--processes=2',
117            '-v',
118            'test_subtests.Case.test_subtest_success'
119        )
120        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
121        self.assertTestRunOutputMatches(proc, stderr='test_subtest_success')
122        self.assertTestRunOutputMatches(proc, stderr='OK')
123        self.assertEqual(proc.poll(), 0)
124
125    def test_failure(self):
126        proc = self.runIn(
127            'scenario/subtests',
128            '--plugin=nose2.plugins.mp',
129            '--processes=2',
130            '-v',
131            'test_subtests.Case.test_subtest_failure'
132        )
133        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
134        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=1\)')
135        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=3\)')
136        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=5\)')
137        self.assertTestRunOutputMatches(proc, stderr='FAILED \(failures=3\)')
138        self.assertEqual(proc.poll(), 1)
139
140    def test_error(self):
141        proc = self.runIn(
142            'scenario/subtests',
143            '--plugin=nose2.plugins.mp',
144            '--processes=2',
145            '-v',
146            'test_subtests.Case.test_subtest_error'
147        )
148        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
149        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=0\)')
150        self.assertTestRunOutputMatches(proc, stderr='RuntimeError: 0')
151        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=1\)')
152        self.assertTestRunOutputMatches(proc, stderr='RuntimeError: 1')
153        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=2\)')
154        self.assertTestRunOutputMatches(proc, stderr='RuntimeError: 2')
155        self.assertTestRunOutputMatches(proc, stderr='FAILED \(errors=3\)')
156        self.assertEqual(proc.poll(), 1)
157
158    def test_expected_failure(self):
159        proc = self.runIn(
160            'scenario/subtests',
161            '--plugin=nose2.plugins.mp',
162            '--processes=2',
163            '-v',
164            'test_subtests.Case.test_subtest_expected_failure'
165        )
166        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
167        self.assertTestRunOutputMatches(proc, stderr='test_subtest_expected_failure.*expected failure')
168        self.assertTestRunOutputMatches(proc, stderr='OK \(expected failures=1\)')
169        self.assertEqual(proc.poll(), 0)
170
171    def test_message(self):
172        proc = self.runIn(
173            'scenario/subtests',
174            '--plugin=nose2.plugins.mp',
175            '--processes=2',
176            '-v',
177            'test_subtests.Case.test_subtest_message'
178        )
179        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
180        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=1\)')
181        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=3\)')
182        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=5\)')
183        self.assertTestRunOutputMatches(proc, stderr='FAILED \(failures=3\)')
184        self.assertEqual(proc.poll(), 1)
185
186    def test_all(self):
187        proc = self.runIn(
188            'scenario/subtests',
189            '--plugin=nose2.plugins.mp',
190            '--processes=2',
191            '-v',
192            'test_subtests'
193        )
194        self.assertTestRunOutputMatches(proc, stderr='Ran 5 tests')
195        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=1\)')
196        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=3\)')
197        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=5\)')
198        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=1\)')
199        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=3\)')
200        self.assertTestRunOutputMatches(proc, stderr='test_subtest_message.*\[msg\] \(i=5\)')
201        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=0\)')
202        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=1\)')
203        self.assertTestRunOutputMatches(proc, stderr='test_subtest_error.*\(i=2\)')
204        self.assertTestRunOutputMatches(proc, stderr='FAILED \(failures=6, errors=3, expected failures=1\)')
205        self.assertEqual(proc.poll(), 1)
206
207
208class TestSubtestsJunitXml(FunctionalTestCase):
209
210    def setUp(self):
211        super(TestSubtestsJunitXml, self).setUp()
212        if sys.version_info < (3, 4):
213            self.skipTest('Python >= 3.4 required')
214        tmp = tempfile.NamedTemporaryFile(delete=False)
215        tmp.close()
216        self.junit_report = tmp.name
217
218    def tearDown(self):
219        os.remove(self.junit_report)
220
221    def test_success(self):
222        proc = self.runIn(
223            'scenario/subtests',
224            '--plugin=nose2.plugins.junitxml',
225            '--junit-xml',
226            '--junit-xml-path={}'.format(self.junit_report),
227            '-v',
228            'test_subtests.Case.test_subtest_success'
229        )
230        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
231        self.assertTestRunOutputMatches(proc, stderr='OK')
232        self.assertEqual(proc.poll(), 0)
233
234        tree = read_report(self.junit_report)
235        self.assertEqual(tree.get('tests'), '1')
236        self.assertEqual(tree.get('failures'), '0')
237        self.assertEqual(tree.get('errors'), '0')
238        self.assertEqual(tree.get('skipped'), '0')
239        self.assertEqual(len(tree.findall('testcase')), 1)
240
241        for test_case in tree.findall('testcase'):
242            self.assertEqual(test_case.get('name'), 'test_subtest_success')
243
244    def test_failure(self):
245        proc = self.runIn(
246            'scenario/subtests',
247            '--plugin=nose2.plugins.junitxml',
248            '--junit-xml',
249            '--junit-xml-path={}'.format(self.junit_report),
250            '-v',
251            'test_subtests.Case.test_subtest_failure'
252        )
253        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
254        self.assertTestRunOutputMatches(proc, stderr='FAILED')
255        self.assertEqual(proc.poll(), 1)
256
257        tree = read_report(self.junit_report)
258        self.assertEqual(tree.get('tests'), '1')
259        self.assertEqual(tree.get('failures'), '3')
260        self.assertEqual(tree.get('errors'), '0')
261        self.assertEqual(tree.get('skipped'), '0')
262        self.assertEqual(len(tree.findall('testcase')), 3)
263
264        for index, test_case in enumerate(tree.findall('testcase')):
265            self.assertEqual(test_case.get('name'), 'test_subtest_failure (i={})'.format(index*2+1))
266
267    def test_error(self):
268        proc = self.runIn(
269            'scenario/subtests',
270            '--plugin=nose2.plugins.junitxml',
271            '--junit-xml',
272            '--junit-xml-path={}'.format(self.junit_report),
273            '-v',
274            'test_subtests.Case.test_subtest_error'
275        )
276        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
277        self.assertTestRunOutputMatches(proc, stderr='FAILED')
278        self.assertEqual(proc.poll(), 1)
279
280        tree = read_report(self.junit_report)
281        self.assertEqual(tree.get('tests'), '1')
282        self.assertEqual(tree.get('failures'), '0')
283        self.assertEqual(tree.get('errors'), '3')
284        self.assertEqual(tree.get('skipped'), '0')
285        self.assertEqual(len(tree.findall('testcase')), 3)
286
287        for index, test_case in enumerate(tree.findall('testcase')):
288            self.assertEqual(test_case.get('name'), 'test_subtest_error (i={})'.format(index))
289
290    def test_expected_failure(self):
291        proc = self.runIn(
292            'scenario/subtests',
293            '--plugin=nose2.plugins.junitxml',
294            '--junit-xml',
295            '--junit-xml-path={}'.format(self.junit_report),
296            '-v',
297            'test_subtests.Case.test_subtest_expected_failure'
298        )
299        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
300        self.assertTestRunOutputMatches(proc, stderr='OK')
301        self.assertEqual(proc.poll(), 0)
302
303        tree = read_report(self.junit_report)
304        self.assertEqual(tree.get('tests'), '1')
305        self.assertEqual(tree.get('failures'), '0')
306        self.assertEqual(tree.get('errors'), '0')
307        self.assertEqual(tree.get('skipped'), '1')
308        self.assertEqual(len(tree.findall('testcase')), 1)
309
310        for test_case in tree.findall('testcase'):
311            self.assertEqual(test_case.get('name'), 'test_subtest_expected_failure')
312
313    def test_message(self):
314        proc = self.runIn(
315            'scenario/subtests',
316            '--plugin=nose2.plugins.junitxml',
317            '--junit-xml',
318            '--junit-xml-path={}'.format(self.junit_report),
319            '-v',
320            'test_subtests.Case.test_subtest_message'
321        )
322        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
323        self.assertTestRunOutputMatches(proc, stderr='FAILED')
324        self.assertEqual(proc.poll(), 1)
325
326        tree = read_report(self.junit_report)
327        self.assertEqual(tree.get('tests'), '1')
328        self.assertEqual(tree.get('failures'), '3')
329        self.assertEqual(tree.get('errors'), '0')
330        self.assertEqual(tree.get('skipped'), '0')
331        self.assertEqual(len(tree.findall('testcase')), 3)
332
333        for index, test_case in enumerate(tree.findall('testcase')):
334            self.assertEqual(test_case.get('name'), 'test_subtest_message [msg] (i={})'.format(index*2+1))
335
336    def test_all(self):
337        proc = self.runIn(
338            'scenario/subtests',
339            '--plugin=nose2.plugins.junitxml',
340            '--junit-xml',
341            '--junit-xml-path={}'.format(self.junit_report),
342            '-v',
343            'test_subtests'
344        )
345        self.assertTestRunOutputMatches(proc, stderr='Ran 5 tests')
346        self.assertTestRunOutputMatches(proc, stderr='FAILED')
347        self.assertEqual(proc.poll(), 1)
348
349        tree = read_report(self.junit_report)
350        self.assertEqual(tree.get('tests'), '5')
351        self.assertEqual(tree.get('failures'), '6')
352        self.assertEqual(tree.get('errors'), '3')
353        self.assertEqual(tree.get('skipped'), '1')
354        self.assertEqual(len(tree.findall('testcase')), 11)
355
356
357class TestSubtestsFailFast(FunctionalTestCase):
358
359    def setUp(self):
360        super(TestSubtestsFailFast, self).setUp()
361        if sys.version_info < (3, 4):
362            self.skipTest('Python >= 3.4 required')
363
364    def test_failure(self):
365        proc = self.runIn(
366            'scenario/subtests',
367            '-v',
368            'test_subtests.Case.test_subtest_failure'
369        )
370        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
371        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=1\)')
372        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=3\)')
373        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=5\)')
374        self.assertTestRunOutputMatches(proc, stderr='FAILED \(failures=3\)')
375        self.assertEqual(proc.poll(), 1)
376
377    def test_failfast(self):
378        proc = self.runIn(
379            'scenario/subtests',
380            '--fail-fast',
381            '-v',
382            'test_subtests.Case.test_subtest_failure'
383        )
384        self.assertTestRunOutputMatches(proc, stderr='Ran 1 test')
385        self.assertTestRunOutputMatches(proc, stderr='test_subtest_failure.*\(i=1\)')
386        self.assertTestRunOutputMatches(proc, stderr='FAILED \(failures=1\)')
387        self.assertEqual(proc.poll(), 1)
388