1import json
2import platform
3
4import pytest
5
6pytest_plugins = 'pytester',
7platform
8
9
10def test_help(testdir):
11    result = testdir.runpytest_subprocess('--help')
12    result.stdout.fnmatch_lines([
13        "*", "*",
14        "benchmark:",
15        "  --benchmark-min-time=SECONDS",
16        "                        *Default: '0.000005'",
17        "  --benchmark-max-time=SECONDS",
18        "                        *Default: '1.0'",
19        "  --benchmark-min-rounds=NUM",
20        "                        *Default: 5",
21        "  --benchmark-timer=FUNC",
22        "  --benchmark-calibration-precision=NUM",
23        "                        *Default: 10",
24        "  --benchmark-warmup=[KIND]",
25        "  --benchmark-warmup-iterations=NUM",
26        "                        *Default: 100000",
27        "  --benchmark-disable-gc",
28        "  --benchmark-skip      *",
29        "  --benchmark-only      *",
30        "  --benchmark-save=NAME",
31        "  --benchmark-autosave  *",
32        "  --benchmark-save-data",
33        "  --benchmark-json=PATH",
34        "  --benchmark-compare=[NUM|_ID]",
35        "  --benchmark-compare-fail=EXPR?[[]EXPR?...[]]",
36        "  --benchmark-cprofile=COLUMN",
37        "  --benchmark-storage=URI",
38        "                        *Default: 'file://./.benchmarks'.",
39        "  --benchmark-verbose   *",
40        "  --benchmark-sort=COL  *",
41        "  --benchmark-group-by=LABEL",
42        "                        *Default: 'group'",
43        "  --benchmark-columns=LABELS",
44        "  --benchmark-histogram=[FILENAME-PREFIX]",
45        "*",
46    ])
47
48
49def test_groups(testdir):
50    test = testdir.makepyfile('''"""
51    >>> print('Yay, doctests!')
52    Yay, doctests!
53"""
54import time
55import pytest
56
57def test_fast(benchmark):
58    benchmark(lambda: time.sleep(0.000001))
59    assert 1 == 1
60
61def test_slow(benchmark):
62    benchmark(lambda: time.sleep(0.001))
63    assert 1 == 1
64
65@pytest.mark.benchmark(group="A")
66def test_slower(benchmark):
67    benchmark(lambda: time.sleep(0.01))
68    assert 1 == 1
69
70@pytest.mark.benchmark(group="A", warmup=True)
71def test_xfast(benchmark):
72    benchmark(lambda: None)
73    assert 1 == 1
74''')
75    result = testdir.runpytest_subprocess('-vv', '--doctest-modules', test)
76    result.stdout.fnmatch_lines([
77        "*collected 5 items",
78        "*",
79        "test_groups.py::*test_groups PASSED*",
80        "test_groups.py::test_fast PASSED*",
81        "test_groups.py::test_slow PASSED*",
82        "test_groups.py::test_slower PASSED*",
83        "test_groups.py::test_xfast PASSED*",
84        "*",
85        "* benchmark: 2 tests *",
86        "*",
87        "* benchmark 'A': 2 tests *",
88        "*",
89        "*====== 5 passed * ======*",
90    ])
91
92
93SIMPLE_TEST = '''
94"""
95    >>> print('Yay, doctests!')
96    Yay, doctests!
97"""
98import time
99import pytest
100
101def test_fast(benchmark):
102    @benchmark
103    def result():
104        return time.sleep(0.000001)
105    assert result == None
106
107def test_slow(benchmark):
108    benchmark(lambda: time.sleep(0.1))
109    assert 1 == 1
110'''
111
112FIXTURES_ALSO_SKIPPED_TEST = '''
113import pytest
114
115@pytest.fixture
116def dep():
117    print("""
118dep created""")
119
120def test_normal(dep):
121    pass
122
123def test_bench(benchmark):
124    benchmark.pedantic(lambda: None)
125'''
126
127GROUPING_TEST = '''
128import pytest
129
130@pytest.mark.parametrize("foo", range(2))
131@pytest.mark.benchmark(group="A")
132def test_a(benchmark, foo):
133    benchmark(str)
134
135@pytest.mark.parametrize("foo", range(2))
136@pytest.mark.benchmark(group="B")
137def test_b(benchmark, foo):
138    benchmark(int)
139'''
140
141GROUPING_PARAMS_TEST = '''
142import pytest
143
144@pytest.mark.parametrize("bar", ["bar1", "bar2"])
145@pytest.mark.parametrize("foo", ["foo1", "foo2"])
146@pytest.mark.benchmark(group="A")
147def test_a(benchmark, foo, bar):
148    benchmark(str)
149
150
151@pytest.mark.parametrize("bar", ["bar1", "bar2"])
152@pytest.mark.parametrize("foo", ["foo1", "foo2"])
153@pytest.mark.benchmark(group="B")
154def test_b(benchmark, foo, bar):
155    benchmark(int)
156'''
157
158
159def test_group_by_name(testdir):
160    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
161    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
162    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'name', test_x,
163                                          test_y)
164    result.stdout.fnmatch_lines([
165        '*', '*', '*', '*', '*',
166        "* benchmark 'test_a[[]0[]]': 2 tests *",
167        'Name (time in ?s)     *',
168        '----------------------*',
169        'test_a[[]0[]]             *',
170        'test_a[[]0[]]             *',
171        '----------------------*',
172        '*',
173        "* benchmark 'test_a[[]1[]]': 2 tests *",
174        'Name (time in ?s)     *',
175        '----------------------*',
176        'test_a[[]1[]]             *',
177        'test_a[[]1[]]             *',
178        '----------------------*',
179        '*',
180        "* benchmark 'test_b[[]0[]]': 2 tests *",
181        'Name (time in ?s)     *',
182        '----------------------*',
183        'test_b[[]0[]]             *',
184        'test_b[[]0[]]             *',
185        '----------------------*',
186        '*',
187        "* benchmark 'test_b[[]1[]]': 2 tests *",
188        'Name (time in ?s)     *',
189        '----------------------*',
190        'test_b[[]1[]]             *',
191        'test_b[[]1[]]             *',
192        '----------------------*',
193    ])
194
195
196def test_group_by_func(testdir):
197    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
198    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
199    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'func', test_x,
200                                          test_y)
201    result.stdout.fnmatch_lines([
202        '*', '*', '*', '*',
203        "* benchmark 'test_a': 4 tests *",
204        'Name (time in ?s)     *',
205        '----------------------*',
206        'test_a[[]*[]]             *',
207        'test_a[[]*[]]             *',
208        'test_a[[]*[]]             *',
209        'test_a[[]*[]]             *',
210        '----------------------*',
211        '*',
212        "* benchmark 'test_b': 4 tests *",
213        'Name (time in ?s)     *',
214        '----------------------*',
215        'test_b[[]*[]]             *',
216        'test_b[[]*[]]             *',
217        'test_b[[]*[]]             *',
218        'test_b[[]*[]]             *',
219        '----------------------*',
220        '*', '*',
221        '============* 8 passed * ============*',
222    ])
223
224
225def test_group_by_fullfunc(testdir):
226    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
227    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
228    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullfunc', test_x,
229                                          test_y)
230    result.stdout.fnmatch_lines([
231        '*', '*', '*', '*', '*',
232        "* benchmark 'test_x.py::test_a': 2 tests *",
233        'Name (time in ?s) *',
234        '------------------*',
235        'test_a[[]*[]]         *',
236        'test_a[[]*[]]         *',
237        '------------------*',
238        '',
239        "* benchmark 'test_x.py::test_b': 2 tests *",
240        'Name (time in ?s) *',
241        '------------------*',
242        'test_b[[]*[]]         *',
243        'test_b[[]*[]]         *',
244        '------------------*',
245        '',
246        "* benchmark 'test_y.py::test_a': 2 tests *",
247        'Name (time in ?s) *',
248        '------------------*',
249        'test_a[[]*[]]         *',
250        'test_a[[]*[]]         *',
251        '------------------*',
252        '',
253        "* benchmark 'test_y.py::test_b': 2 tests *",
254        'Name (time in ?s) *',
255        '------------------*',
256        'test_b[[]*[]]         *',
257        'test_b[[]*[]]         *',
258        '------------------*',
259        '',
260        'Legend:',
261        '  Outliers: 1 Standard Deviation from M*',
262        '============* 8 passed * ============*',
263    ])
264
265
266def test_group_by_param_all(testdir):
267    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
268    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
269    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'param', test_x,
270                                          test_y)
271    result.stdout.fnmatch_lines([
272        '*', '*', '*', '*', '*',
273        "* benchmark '0': 4 tests *",
274        'Name (time in ?s)  *',
275        '-------------------*',
276        'test_*[[]0[]]          *',
277        'test_*[[]0[]]          *',
278        'test_*[[]0[]]          *',
279        'test_*[[]0[]]          *',
280        '-------------------*',
281        '',
282        "* benchmark '1': 4 tests *",
283        'Name (time in ?s) *',
284        '------------------*',
285        'test_*[[]1[]]         *',
286        'test_*[[]1[]]         *',
287        'test_*[[]1[]]         *',
288        'test_*[[]1[]]         *',
289        '------------------*',
290        '',
291        'Legend:',
292        '  Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
293        'Quartile.',
294        '============* 8 passed * ============*',
295    ])
296
297
298def test_group_by_param_select(testdir):
299    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
300    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001',
301                                          '--benchmark-group-by', 'param:foo',
302                                          '--benchmark-sort', 'fullname',
303                                          test_x)
304    result.stdout.fnmatch_lines([
305        '*', '*', '*', '*', '*',
306        "* benchmark 'foo=foo1': 4 tests *",
307        'Name (time in ?s)  *',
308        '-------------------*',
309        'test_a[[]foo1-bar1[]]    *',
310        'test_a[[]foo1-bar2[]]    *',
311        'test_b[[]foo1-bar1[]]    *',
312        'test_b[[]foo1-bar2[]]    *',
313        '-------------------*',
314        '',
315        "* benchmark 'foo=foo2': 4 tests *",
316        'Name (time in ?s) *',
317        '------------------*',
318        'test_a[[]foo2-bar1[]]    *',
319        'test_a[[]foo2-bar2[]]    *',
320        'test_b[[]foo2-bar1[]]    *',
321        'test_b[[]foo2-bar2[]]    *',
322        '------------------*',
323        '',
324        'Legend:',
325        '  Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
326        'Quartile.',
327        '============* 8 passed * ============*',
328    ])
329
330
331def test_group_by_param_select_multiple(testdir):
332    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
333    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001',
334                                          '--benchmark-group-by', 'param:foo,param:bar',
335                                          '--benchmark-sort', 'fullname',
336                                          test_x)
337    result.stdout.fnmatch_lines([
338        '*', '*', '*', '*', '*',
339        "* benchmark 'foo=foo1 bar=bar1': 2 tests *",
340        'Name (time in ?s)  *',
341        '-------------------*',
342        'test_a[[]foo1-bar1[]]    *',
343        'test_b[[]foo1-bar1[]]    *',
344        '-------------------*',
345        '',
346        "* benchmark 'foo=foo1 bar=bar2': 2 tests *",
347        'Name (time in ?s)  *',
348        '-------------------*',
349        'test_a[[]foo1-bar2[]]    *',
350        'test_b[[]foo1-bar2[]]    *',
351        '-------------------*',
352        '',
353        "* benchmark 'foo=foo2 bar=bar1': 2 tests *",
354        'Name (time in ?s) *',
355        '------------------*',
356        'test_a[[]foo2-bar1[]]    *',
357        'test_b[[]foo2-bar1[]]    *',
358        '-------------------*',
359        '',
360        "* benchmark 'foo=foo2 bar=bar2': 2 tests *",
361        'Name (time in ?s)  *',
362        '-------------------*',
363        'test_a[[]foo2-bar2[]]    *',
364        'test_b[[]foo2-bar2[]]    *',
365        '------------------*',
366        '',
367        'Legend:',
368        '  Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
369        'Quartile.',
370        '============* 8 passed * ============*',
371    ])
372
373
374def test_group_by_fullname(testdir):
375    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
376    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
377    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullname', test_x,
378                                          test_y)
379    result.stdout.fnmatch_lines_random([
380        "* benchmark 'test_x.py::test_a[[]0[]]': 1 tests *",
381        "* benchmark 'test_x.py::test_a[[]1[]]': 1 tests *",
382        "* benchmark 'test_x.py::test_b[[]0[]]': 1 tests *",
383        "* benchmark 'test_x.py::test_b[[]1[]]': 1 tests *",
384        "* benchmark 'test_y.py::test_a[[]0[]]': 1 tests *",
385        "* benchmark 'test_y.py::test_a[[]1[]]': 1 tests *",
386        "* benchmark 'test_y.py::test_b[[]0[]]': 1 tests *",
387        "* benchmark 'test_y.py::test_b[[]1[]]': 1 tests *",
388        '============* 8 passed * ============*',
389    ])
390
391
392def test_double_use(testdir):
393    test = testdir.makepyfile('''
394def test_a(benchmark):
395    benchmark(lambda: None)
396    benchmark.pedantic(lambda: None)
397
398def test_b(benchmark):
399    benchmark.pedantic(lambda: None)
400    benchmark(lambda: None)
401''')
402    result = testdir.runpytest_subprocess(test, '--tb=line')
403    result.stdout.fnmatch_lines([
404        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark(...) mode.',
405        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark.pedantic(...) mode.',
406    ])
407
408
409def test_only_override_skip(testdir):
410    test = testdir.makepyfile(SIMPLE_TEST)
411    result = testdir.runpytest_subprocess('--benchmark-only', '--benchmark-skip', test)
412    result.stdout.fnmatch_lines([
413        "*collected 2 items",
414        "test_only_override_skip.py ..*",
415        "* benchmark: 2 tests *",
416        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
417        "------*",
418        "test_fast          *",
419        "test_slow          *",
420        "------*",
421        "*====== 2 passed * ======*",
422    ])
423
424
425def test_fixtures_also_skipped(testdir):
426    test = testdir.makepyfile(FIXTURES_ALSO_SKIPPED_TEST)
427    result = testdir.runpytest_subprocess('--benchmark-only', '-s', test)
428    result.stdout.fnmatch_lines([
429        "*collected 2 items",
430        "*====== 1 passed, 1 skipped in * ======*",
431    ])
432    assert 'dep created' not in result.stdout.lines
433
434
435def test_conflict_between_only_and_disable(testdir):
436    test = testdir.makepyfile(SIMPLE_TEST)
437    result = testdir.runpytest_subprocess('--benchmark-only', '--benchmark-disable', test)
438    result.stderr.fnmatch_lines([
439        "ERROR: Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
440        "automatically activated if xdist is on or you're missing the statistics dependency."
441    ])
442
443
444def test_max_time_min_rounds(testdir):
445    test = testdir.makepyfile(SIMPLE_TEST)
446    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-max-time=0.000001',
447                                          '--benchmark-min-rounds=1', test)
448    result.stdout.fnmatch_lines([
449        "*collected 3 items",
450        "test_max_time_min_rounds.py ...*",
451        "* benchmark: 2 tests *",
452        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
453        "------*",
454        "test_fast          * 1  *",
455        "test_slow          * 1  *",
456        "------*",
457        "*====== 3 passed * ======*",
458    ])
459
460
461def test_max_time(testdir):
462    test = testdir.makepyfile(SIMPLE_TEST)
463    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-max-time=0.000001', test)
464    result.stdout.fnmatch_lines([
465        "*collected 3 items",
466        "test_max_time.py ...*",
467        "* benchmark: 2 tests *",
468        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
469        "------*",
470        "test_fast          * 5  *",
471        "test_slow          * 5  *",
472        "------*",
473        "*====== 3 passed * ======*",
474    ])
475
476
477def test_bogus_max_time(testdir):
478    test = testdir.makepyfile(SIMPLE_TEST)
479    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-max-time=bogus', test)
480    result.stderr.fnmatch_lines([
481        "*usage: *",
482        "*: error: argument --benchmark-max-time: Invalid decimal value 'bogus': InvalidOperation*",
483    ])
484
485
486@pytest.mark.skipif("platform.python_implementation() == 'PyPy'")
487def test_pep418_timer(testdir):
488    test = testdir.makepyfile(SIMPLE_TEST)
489    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules',
490                                          '--benchmark-timer=pep418.perf_counter', test)
491    result.stdout.fnmatch_lines([
492        "* (defaults: timer=*.perf_counter*",
493    ])
494
495
496def test_bad_save(testdir):
497    test = testdir.makepyfile(SIMPLE_TEST)
498    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-save=asd:f?', test)
499    result.stderr.fnmatch_lines([
500        "*usage: *",
501        "*: error: argument --benchmark-save: Must not contain any of these characters: /:*?<>|\\ (it has ':?')",
502    ])
503
504
505def test_bad_save_2(testdir):
506    test = testdir.makepyfile(SIMPLE_TEST)
507    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-save=', test)
508    result.stderr.fnmatch_lines([
509        "*usage: *",
510        "*: error: argument --benchmark-save: Can't be empty.",
511    ])
512
513
514def test_bad_compare_fail(testdir):
515    test = testdir.makepyfile(SIMPLE_TEST)
516    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-compare-fail=?', test)
517    result.stderr.fnmatch_lines([
518        "*usage: *",
519        "*: error: argument --benchmark-compare-fail: Could not parse value: '?'.",
520    ])
521
522
523def test_bad_rounds(testdir):
524    test = testdir.makepyfile(SIMPLE_TEST)
525    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-min-rounds=asd', test)
526    result.stderr.fnmatch_lines([
527        "*usage: *",
528        "*: error: argument --benchmark-min-rounds: invalid literal for int() with base 10: 'asd'",
529    ])
530
531
532def test_bad_rounds_2(testdir):
533    test = testdir.makepyfile(SIMPLE_TEST)
534    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-min-rounds=0', test)
535    result.stderr.fnmatch_lines([
536        "*usage: *",
537        "*: error: argument --benchmark-min-rounds: Value for --benchmark-rounds must be at least 1.",
538    ])
539
540
541def test_compare(testdir):
542    test = testdir.makepyfile(SIMPLE_TEST)
543    testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
544    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules',
545                                          '--benchmark-compare=0001',
546                                          '--benchmark-compare-fail=min:0.1', test)
547    result.stderr.fnmatch_lines([
548        "Comparing against benchmarks from: *0001_unversioned_*.json",
549    ])
550    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules',
551                                          '--benchmark-compare=0001',
552                                          '--benchmark-compare-fail=min:1%', test)
553    result.stderr.fnmatch_lines([
554        "Comparing against benchmarks from: *0001_unversioned_*.json",
555    ])
556
557
558def test_compare_last(testdir):
559    test = testdir.makepyfile(SIMPLE_TEST)
560    testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
561    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
562                                          '--benchmark-compare-fail=min:0.1', test)
563    result.stderr.fnmatch_lines([
564        "Comparing against benchmarks from: *0001_unversioned_*.json",
565    ])
566    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
567                                          '--benchmark-compare-fail=min:1%', test)
568    result.stderr.fnmatch_lines([
569        "Comparing against benchmarks from: *0001_unversioned_*.json",
570    ])
571
572
573def test_compare_non_existing(testdir):
574    test = testdir.makepyfile(SIMPLE_TEST)
575    testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
576    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules',
577                                          '--benchmark-compare=0002', '-rw',
578                                          test)
579    result.stderr.fnmatch_lines([
580        "* PytestBenchmarkWarning: Can't compare. No benchmark files * '0002'.",
581    ])
582
583
584def test_compare_non_existing_verbose(testdir):
585    test = testdir.makepyfile(SIMPLE_TEST)
586    testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
587    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules',
588                                          '--benchmark-compare=0002',
589                                          test, '--benchmark-verbose')
590    result.stderr.fnmatch_lines([
591        " WARNING: Can't compare. No benchmark files * '0002'.",
592    ])
593
594
595def test_compare_no_files(testdir):
596    test = testdir.makepyfile(SIMPLE_TEST)
597    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
598                                          test, '--benchmark-compare')
599    result.stderr.fnmatch_lines([
600        "* PytestBenchmarkWarning: Can't compare. No benchmark files in '*'. Can't load the previous benchmark."
601    ])
602
603
604def test_compare_no_files_verbose(testdir):
605    test = testdir.makepyfile(SIMPLE_TEST)
606    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules',
607                                          test, '--benchmark-compare', '--benchmark-verbose')
608    result.stderr.fnmatch_lines([
609        " WARNING: Can't compare. No benchmark files in '*'."
610        " Can't load the previous benchmark."
611    ])
612
613
614def test_compare_no_files_match(testdir):
615    test = testdir.makepyfile(SIMPLE_TEST)
616    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
617                                          test, '--benchmark-compare=1')
618    result.stderr.fnmatch_lines([
619        "* PytestBenchmarkWarning: Can't compare. No benchmark files in '*' match '1'."
620    ])
621
622
623def test_compare_no_files_match_verbose(testdir):
624    test = testdir.makepyfile(SIMPLE_TEST)
625    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules',
626                                          test, '--benchmark-compare=1', '--benchmark-verbose')
627    result.stderr.fnmatch_lines([
628        " WARNING: Can't compare. No benchmark files in '*' match '1'."
629    ])
630
631
632def test_verbose(testdir):
633    test = testdir.makepyfile(SIMPLE_TEST)
634    result = testdir.runpytest_subprocess('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-verbose',
635                                          '-vv', test)
636    result.stderr.fnmatch_lines([
637        "  Calibrating to target round *s; will estimate when reaching *s (using: *, precision: *).",
638        "    Measured * iterations: *s.",
639        "  Running * rounds x * iterations ...",
640        "  Ran for *s.",
641    ])
642
643
644def test_save(testdir):
645    test = testdir.makepyfile(SIMPLE_TEST)
646    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-save=foobar',
647                                          '--benchmark-max-time=0.0000001', test)
648    result.stderr.fnmatch_lines([
649        "Saved benchmark data in: *",
650    ])
651    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
652
653
654def test_save_extra_info(testdir):
655    test = testdir.makepyfile("""
656    def test_extra(benchmark):
657        benchmark.extra_info['foo'] = 'bar'
658        benchmark(lambda: None)
659    """)
660    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-save=foobar',
661                                          '--benchmark-max-time=0.0000001', test)
662    result.stderr.fnmatch_lines([
663        "Saved benchmark data in: *",
664    ])
665    info = json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
666    bench_info = info['benchmarks'][0]
667    assert bench_info['name'] == 'test_extra'
668    assert bench_info['extra_info'] == {'foo': 'bar'}
669
670
671def test_update_machine_info_hook_detection(testdir):
672    """Tests detection and execution and update_machine_info_hooks.
673
674    Verifies that machine info hooks are detected and executed in nested
675    `conftest.py`s.
676
677    """
678
679    record_path_conftest = '''
680import os
681
682def pytest_benchmark_update_machine_info(config, machine_info):
683    machine_info["conftest_path"] = (
684        machine_info.get("conftest_path", []) + [os.path.relpath(__file__)]
685    )
686    '''
687
688    simple_test = '''
689def test_simple(benchmark):
690    @benchmark
691    def resuilt():
692        1+1
693    '''
694
695    testdir.makepyfile(**{
696        "conftest": record_path_conftest,
697        "test_module/conftest": record_path_conftest,
698        "test_module/tests/conftest": record_path_conftest,
699        "test_module/tests/simple_test.py": simple_test,
700    })
701
702    def run_verify_pytest(*args):
703        testdir.runpytest_subprocess(
704            '--benchmark-json=benchmark.json',
705            '--benchmark-max-time=0.0000001',
706            *args
707        )
708
709        benchmark_json = json.loads(testdir.tmpdir.join('benchmark.json').read())
710        machine_info = benchmark_json["machine_info"]
711
712        assert sorted(
713            i.replace('\\', '/') for i in machine_info["conftest_path"]
714        ) == sorted([
715            "conftest.py",
716            "test_module/conftest.py",
717            "test_module/tests/conftest.py",
718        ])
719
720    run_verify_pytest("test_module/tests")
721    run_verify_pytest("test_module")
722    run_verify_pytest(".")
723
724
725def test_histogram(testdir):
726    test = testdir.makepyfile(SIMPLE_TEST)
727    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-histogram=foobar',
728                                          '--benchmark-max-time=0.0000001', test)
729    result.stderr.fnmatch_lines([
730        "Generated histogram: *foobar.svg",
731    ])
732    assert [f.basename for f in testdir.tmpdir.listdir("*.svg", sort=True)] == [
733        'foobar.svg',
734    ]
735
736
737def test_autosave(testdir):
738    test = testdir.makepyfile(SIMPLE_TEST)
739    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-autosave',
740                                          '--benchmark-max-time=0.0000001', test)
741    result.stderr.fnmatch_lines([
742        "Saved benchmark data in: *",
743    ])
744    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].listdir('0001_*.json')[0].read())
745
746
747def test_bogus_min_time(testdir):
748    test = testdir.makepyfile(SIMPLE_TEST)
749    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-min-time=bogus', test)
750    result.stderr.fnmatch_lines([
751        "*usage: *",
752        "*: error: argument --benchmark-min-time: Invalid decimal value 'bogus': InvalidOperation*",
753    ])
754
755
756def test_disable_gc(testdir):
757    test = testdir.makepyfile(SIMPLE_TEST)
758    result = testdir.runpytest_subprocess('--benchmark-disable-gc', test)
759    result.stdout.fnmatch_lines([
760        "*collected 2 items",
761        "test_disable_gc.py ..*",
762        "* benchmark: 2 tests *",
763        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
764        "------*",
765        "test_fast          *",
766        "test_slow          *",
767        "------*",
768        "*====== 2 passed * ======*",
769    ])
770
771
772def test_custom_timer(testdir):
773    test = testdir.makepyfile(SIMPLE_TEST)
774    result = testdir.runpytest_subprocess('--benchmark-timer=time.time', test)
775    result.stdout.fnmatch_lines([
776        "*collected 2 items",
777        "test_custom_timer.py ..*",
778        "* benchmark: 2 tests *",
779        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
780        "------*",
781        "test_fast          *",
782        "test_slow          *",
783        "------*",
784        "*====== 2 passed * ======*",
785    ])
786
787
788def test_bogus_timer(testdir):
789    test = testdir.makepyfile(SIMPLE_TEST)
790    result = testdir.runpytest_subprocess('--benchmark-timer=bogus', test)
791    result.stderr.fnmatch_lines([
792        "*usage: *",
793        "*: error: argument --benchmark-timer: Value for --benchmark-timer must be in dotted form. Eg: "
794        "'module.attr'.",
795    ])
796
797
798def test_sort_by_mean(testdir):
799    test = testdir.makepyfile(SIMPLE_TEST)
800    result = testdir.runpytest_subprocess('--benchmark-sort=mean', test)
801    result.stdout.fnmatch_lines([
802        "*collected 2 items",
803        "test_sort_by_mean.py ..*",
804        "* benchmark: 2 tests *",
805        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
806        "------*",
807        "test_fast          *",
808        "test_slow          *",
809        "------*",
810        "*====== 2 passed * ======*",
811    ])
812
813
814def test_bogus_sort(testdir):
815    test = testdir.makepyfile(SIMPLE_TEST)
816    result = testdir.runpytest_subprocess('--benchmark-sort=bogus', test)
817    result.stderr.fnmatch_lines([
818        "*usage: *",
819        "*: error: argument --benchmark-sort: Unacceptable value: 'bogus'. Value for --benchmark-sort must be one "
820        "of: 'min', 'max', 'mean', 'stddev', 'name', 'fullname'."
821    ])
822
823
824def test_xdist(testdir):
825    pytest.importorskip('xdist')
826    test = testdir.makepyfile(SIMPLE_TEST)
827    result = testdir.runpytest_subprocess('--doctest-modules', '-n', '1', '-rw', test)
828    result.stderr.fnmatch_lines([
829        "* Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be "
830        "performed reliably in a parallelized environment.",
831    ])
832
833
834def test_xdist_verbose(testdir):
835    pytest.importorskip('xdist')
836    test = testdir.makepyfile(SIMPLE_TEST)
837    result = testdir.runpytest_subprocess('--doctest-modules', '-n', '1', '--benchmark-verbose', test)
838    result.stderr.fnmatch_lines([
839        "------*",
840        " WARNING: Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be performed "
841        "reliably in a parallelized environment.",
842        "------*",
843    ])
844
845
846def test_cprofile(testdir):
847    test = testdir.makepyfile(SIMPLE_TEST)
848    result = testdir.runpytest_subprocess('--benchmark-cprofile=cumtime', test)
849    result.stdout.fnmatch_lines([
850        "------------*----------- cProfile (time in s) ------------*-----------",
851        "test_cprofile.py::test_fast",
852        "ncalls	tottime	percall	cumtime	percall	filename:lineno(function)",
853        # "1	0.0000	0.0000	0.0001	0.0001	test_cprofile0/test_cprofile.py:9(result)",
854        # "1	0.0001	0.0001	0.0001	0.0001	~:0(<built-in method time.sleep>)",
855        # "1	0.0000	0.0000	0.0000	0.0000	~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
856        "",
857        "test_cprofile.py::test_slow",
858        "ncalls	tottime	percall	cumtime	percall	filename:lineno(function)",
859        # "1	0.0000	0.0000	0.1002	0.1002	test_cprofile0/test_cprofile.py:15(<lambda>)",
860        # "1	0.1002	0.1002	0.1002	0.1002	~:0(<built-in method time.sleep>)",
861        # "1	0.0000	0.0000	0.0000	0.0000	~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
862    ])
863
864
865def test_disabled_and_cprofile(testdir):
866    test = testdir.makepyfile(SIMPLE_TEST)
867    result = testdir.runpytest_subprocess('--benchmark-disable', '--benchmark-cprofile=cumtime', test)
868    result.stdout.fnmatch_lines([
869        "*==== 2 passed*",
870    ])
871
872
873def test_abort_broken(testdir):
874    """
875    Test that we don't benchmark code that raises exceptions.
876    """
877    test = testdir.makepyfile('''
878"""
879    >>> print('Yay, doctests!')
880    Yay, doctests!
881"""
882import time
883import pytest
884
885def test_bad(benchmark):
886    @benchmark
887    def result():
888        raise Exception()
889    assert 1 == 1
890
891def test_bad2(benchmark):
892    @benchmark
893    def result():
894        time.sleep(0.1)
895    assert 1 == 0
896
897@pytest.fixture(params=['a', 'b', 'c'])
898def bad_fixture(request):
899    raise ImportError()
900
901def test_ok(benchmark, bad_fixture):
902    @benchmark
903    def result():
904        time.sleep(0.1)
905    assert 1 == 0
906''')
907    result = testdir.runpytest_subprocess('-vv', test)
908    result.stdout.fnmatch_lines([
909        "*collected 5 items",
910        "*",
911        "test_abort_broken.py::test_bad FAILED*",
912        "test_abort_broken.py::test_bad2 FAILED*",
913        "test_abort_broken.py::test_ok*a* ERROR*",
914        "test_abort_broken.py::test_ok*b* ERROR*",
915        "test_abort_broken.py::test_ok*c* ERROR*",
916
917        "*====== ERRORS ======*",
918        "*______ ERROR at setup of test_ok[[]a[]] ______*",
919
920        "request = <SubRequest 'bad_fixture' for <Function *test_ok*>>",
921
922        "    @pytest.fixture(params=['a', 'b', 'c'])",
923        "    def bad_fixture(request):",
924        ">       raise ImportError()",
925        "E       ImportError",
926
927        "test_abort_broken.py:22: ImportError",
928        "*______ ERROR at setup of test_ok[[]b[]] ______*",
929
930        "request = <SubRequest 'bad_fixture' for <Function *test_ok*>>",
931
932        "    @pytest.fixture(params=['a', 'b', 'c'])",
933        "    def bad_fixture(request):",
934        ">       raise ImportError()",
935        "E       ImportError",
936
937        "test_abort_broken.py:22: ImportError",
938        "*______ ERROR at setup of test_ok[[]c[]] ______*",
939
940        "request = <SubRequest 'bad_fixture' for <Function *test_ok*>>",
941
942        "    @pytest.fixture(params=['a', 'b', 'c'])",
943        "    def bad_fixture(request):",
944        ">       raise ImportError()",
945        "E       ImportError",
946
947        "test_abort_broken.py:22: ImportError",
948        "*====== FAILURES ======*",
949        "*______ test_bad ______*",
950
951        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
952
953        "    def test_bad(benchmark):",
954        "?       @benchmark",
955        "?       def result():",
956
957        "test_abort_broken.py:*",
958        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
959        "*",
960        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
961
962        "    @benchmark",
963        "    def result():",
964        ">       raise Exception()",
965        "E       Exception",
966
967        "test_abort_broken.py:11: Exception",
968        "*______ test_bad2 ______*",
969
970        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
971
972        "    def test_bad2(benchmark):",
973        "        @benchmark",
974        "        def result():",
975        "            time.sleep(0.1)",
976        ">       assert 1 == 0",
977        "E       assert 1 == 0",
978
979        "test_abort_broken.py:18: AssertionError",
980    ])
981
982    result.stdout.fnmatch_lines([
983        "* benchmark: 1 tests *",
984        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
985        "------*",
986        "test_bad2           *",
987        "------*",
988
989        "*====== 2 failed*, 3 error* ======*",
990    ])
991
992
993BASIC_TEST = '''
994"""
995Just to make sure the plugin doesn't choke on doctests::
996    >>> print('Yay, doctests!')
997    Yay, doctests!
998"""
999import time
1000from functools import partial
1001
1002import pytest
1003
1004def test_fast(benchmark):
1005    @benchmark
1006    def result():
1007        return time.sleep(0.000001)
1008    assert result is None
1009
1010def test_slow(benchmark):
1011    assert benchmark(partial(time.sleep, 0.001)) is None
1012
1013def test_slower(benchmark):
1014    benchmark(lambda: time.sleep(0.01))
1015
1016@pytest.mark.benchmark(min_rounds=2)
1017def test_xfast(benchmark):
1018    benchmark(str)
1019
1020def test_fast(benchmark):
1021    benchmark(int)
1022'''
1023
1024
1025def test_basic(testdir):
1026    test = testdir.makepyfile(BASIC_TEST)
1027    result = testdir.runpytest_subprocess('-vv', '--doctest-modules', test)
1028    result.stdout.fnmatch_lines([
1029        "*collected 5 items",
1030        "test_basic.py::*test_basic PASSED*",
1031        "test_basic.py::test_slow PASSED*",
1032        "test_basic.py::test_slower PASSED*",
1033        "test_basic.py::test_xfast PASSED*",
1034        "test_basic.py::test_fast PASSED*",
1035        "",
1036        "* benchmark: 4 tests *",
1037        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1038        "------*",
1039        "test_*         *",
1040        "test_*         *",
1041        "test_*         *",
1042        "test_*         *",
1043        "------*",
1044        "",
1045        "*====== 5 passed * ======*",
1046    ])
1047
1048
1049def test_skip(testdir):
1050    test = testdir.makepyfile(BASIC_TEST)
1051    result = testdir.runpytest_subprocess('-vv', '--doctest-modules', '--benchmark-skip', test)
1052    result.stdout.fnmatch_lines([
1053        "*collected 5 items",
1054        "test_skip.py::*test_skip PASSED*",
1055        "test_skip.py::test_slow SKIPPED*",
1056        "test_skip.py::test_slower SKIPPED*",
1057        "test_skip.py::test_xfast SKIPPED*",
1058        "test_skip.py::test_fast SKIPPED*",
1059        "*====== 1 passed, 4 skipped * ======*",
1060    ])
1061
1062
1063def test_disable(testdir):
1064    test = testdir.makepyfile(BASIC_TEST)
1065    result = testdir.runpytest_subprocess('-vv', '--doctest-modules', '--benchmark-disable', test)
1066    result.stdout.fnmatch_lines([
1067        "*collected 5 items",
1068        "test_disable.py::*test_disable PASSED*",
1069        "test_disable.py::test_slow PASSED*",
1070        "test_disable.py::test_slower PASSED*",
1071        "test_disable.py::test_xfast PASSED*",
1072        "test_disable.py::test_fast PASSED*",
1073        "*====== 5 passed * ======*",
1074    ])
1075
1076
1077def test_mark_selection(testdir):
1078    test = testdir.makepyfile(BASIC_TEST)
1079    result = testdir.runpytest_subprocess('-vv', '--doctest-modules', '-m', 'benchmark', test)
1080    result.stdout.fnmatch_lines([
1081        "*collected 5 items*",
1082        "test_mark_selection.py::test_xfast PASSED*",
1083        "* benchmark: 1 tests *",
1084        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1085        "------*",
1086        "test_xfast       *",
1087        "------*",
1088        "*====== 1 passed, 4 deselected * ======*",
1089    ])
1090
1091
1092def test_only_benchmarks(testdir):
1093    test = testdir.makepyfile(BASIC_TEST)
1094    result = testdir.runpytest_subprocess('-vv', '--doctest-modules', '--benchmark-only', test)
1095    result.stdout.fnmatch_lines([
1096        "*collected 5 items",
1097        "test_only_benchmarks.py::*test_only_benchmarks SKIPPED*",
1098        "test_only_benchmarks.py::test_slow PASSED*",
1099        "test_only_benchmarks.py::test_slower PASSED*",
1100        "test_only_benchmarks.py::test_xfast PASSED*",
1101        "test_only_benchmarks.py::test_fast PASSED*",
1102        "* benchmark: 4 tests *",
1103        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1104        "------*",
1105        "test_*         *",
1106        "test_*         *",
1107        "test_*         *",
1108        "test_*         *",
1109        "------*",
1110        "*====== 4 passed, 1 skipped * ======*",
1111    ])
1112
1113
1114def test_columns(testdir):
1115    test = testdir.makepyfile(SIMPLE_TEST)
1116    result = testdir.runpytest_subprocess('--doctest-modules', '--benchmark-columns=max,iterations,min', test)
1117    result.stdout.fnmatch_lines([
1118        "*collected 3 items",
1119        "test_columns.py ...*",
1120        "* benchmark: 2 tests *",
1121        "Name (time in ?s) * Max * Iterations * Min *",
1122        "------*",
1123    ])
1124