1 #![allow(deprecated)]
2
3 use criterion;
4 use serde_json;
5
6 use criterion::{
7 criterion_group, criterion_main, profiler::Profiler, BatchSize, Benchmark, BenchmarkId,
8 Criterion, Fun, ParameterizedBenchmark, SamplingMode, Throughput,
9 };
10 use serde_json::value::Value;
11 use std::cell::{Cell, RefCell};
12 use std::cmp::max;
13 use std::fs::File;
14 use std::path::{Path, PathBuf};
15 use std::rc::Rc;
16 use std::time::{Duration, SystemTime};
17 use tempfile::{tempdir, TempDir};
18 use walkdir::WalkDir;
19
20 /*
21 * Please note that these tests are not complete examples of how to use
22 * Criterion.rs. See the benches folder for actual examples.
23 */
temp_dir() -> TempDir24 fn temp_dir() -> TempDir {
25 tempdir().unwrap()
26 }
27
28 // Configure a Criterion struct to perform really fast benchmarks. This is not
29 // recommended for real benchmarking, only for testing.
short_benchmark(dir: &TempDir) -> Criterion30 fn short_benchmark(dir: &TempDir) -> Criterion {
31 Criterion::default()
32 .output_directory(dir.path())
33 .warm_up_time(Duration::from_millis(250))
34 .measurement_time(Duration::from_millis(500))
35 .nresamples(2000)
36 .with_plots()
37 }
38
39 #[derive(Clone)]
40 struct Counter {
41 counter: Rc<RefCell<usize>>,
42 }
43 impl Counter {
count(&self)44 fn count(&self) {
45 *(*self.counter).borrow_mut() += 1;
46 }
47
read(&self) -> usize48 fn read(&self) -> usize {
49 *(*self.counter).borrow()
50 }
51 }
52 impl Default for Counter {
default() -> Counter53 fn default() -> Counter {
54 Counter {
55 counter: Rc::new(RefCell::new(0)),
56 }
57 }
58 }
59
verify_file(dir: &PathBuf, path: &str) -> PathBuf60 fn verify_file(dir: &PathBuf, path: &str) -> PathBuf {
61 let full_path = dir.join(path);
62 assert!(
63 full_path.is_file(),
64 "File {:?} does not exist or is not a file",
65 full_path
66 );
67 let metadata = full_path.metadata().unwrap();
68 assert!(metadata.len() > 0);
69 full_path
70 }
71
verify_json(dir: &PathBuf, path: &str)72 fn verify_json(dir: &PathBuf, path: &str) {
73 let full_path = verify_file(dir, path);
74 let f = File::open(full_path).unwrap();
75 serde_json::from_reader::<File, Value>(f).unwrap();
76 }
77
verify_svg(dir: &PathBuf, path: &str)78 fn verify_svg(dir: &PathBuf, path: &str) {
79 verify_file(dir, path);
80 }
81
verify_html(dir: &PathBuf, path: &str)82 fn verify_html(dir: &PathBuf, path: &str) {
83 verify_file(dir, path);
84 }
85
verify_stats(dir: &PathBuf, baseline: &str)86 fn verify_stats(dir: &PathBuf, baseline: &str) {
87 verify_json(&dir, &format!("{}/estimates.json", baseline));
88 verify_json(&dir, &format!("{}/sample.json", baseline));
89 verify_json(&dir, &format!("{}/tukey.json", baseline));
90 verify_json(&dir, &format!("{}/benchmark.json", baseline));
91 verify_file(&dir, &format!("{}/raw.csv", baseline));
92 }
93
verify_not_exists(dir: &PathBuf, path: &str)94 fn verify_not_exists(dir: &PathBuf, path: &str) {
95 assert!(!dir.join(path).exists());
96 }
97
latest_modified(dir: &PathBuf) -> SystemTime98 fn latest_modified(dir: &PathBuf) -> SystemTime {
99 let mut newest_update: Option<SystemTime> = None;
100 for entry in WalkDir::new(dir) {
101 let entry = entry.unwrap();
102 let modified = entry.metadata().unwrap().modified().unwrap();
103 newest_update = match newest_update {
104 Some(latest) => Some(max(latest, modified)),
105 None => Some(modified),
106 };
107 }
108
109 newest_update.expect("failed to find a single time in directory")
110 }
111
112 #[test]
test_creates_directory()113 fn test_creates_directory() {
114 let dir = temp_dir();
115 short_benchmark(&dir).bench_function("test_creates_directory", |b| b.iter(|| 10));
116 assert!(dir.path().join("test_creates_directory").is_dir());
117 }
118
119 #[test]
test_without_plots()120 fn test_without_plots() {
121 let dir = temp_dir();
122 short_benchmark(&dir)
123 .without_plots()
124 .bench_function("test_without_plots", |b| b.iter(|| 10));
125
126 for entry in WalkDir::new(dir.path().join("test_without_plots")) {
127 let entry = entry.ok();
128 let is_svg = entry
129 .as_ref()
130 .and_then(|entry| entry.path().extension())
131 .and_then(|ext| ext.to_str())
132 .map(|ext| ext == "svg")
133 .unwrap_or(false);
134 assert!(
135 !is_svg,
136 "Found SVG file ({:?}) in output directory with plots disabled",
137 entry.unwrap().file_name()
138 );
139 }
140 }
141
142 #[test]
test_save_baseline()143 fn test_save_baseline() {
144 let dir = temp_dir();
145 println!("tmp directory is {:?}", dir.path());
146 short_benchmark(&dir)
147 .save_baseline("some-baseline".to_owned())
148 .bench_function("test_save_baseline", |b| b.iter(|| 10));
149
150 let dir = dir.path().join("test_save_baseline");
151 verify_stats(&dir, "some-baseline");
152
153 verify_not_exists(&dir, "base");
154 }
155
156 #[test]
test_retain_baseline()157 fn test_retain_baseline() {
158 // Initial benchmark to populate
159 let dir = temp_dir();
160 short_benchmark(&dir)
161 .save_baseline("some-baseline".to_owned())
162 .bench_function("test_retain_baseline", |b| b.iter(|| 10));
163
164 let pre_modified = latest_modified(&dir.path().join("test_retain_baseline/some-baseline"));
165
166 short_benchmark(&dir)
167 .retain_baseline("some-baseline".to_owned())
168 .bench_function("test_retain_baseline", |b| b.iter(|| 10));
169
170 let post_modified = latest_modified(&dir.path().join("test_retain_baseline/some-baseline"));
171
172 assert_eq!(pre_modified, post_modified, "baseline modified by retain");
173 }
174
175 #[test]
176 #[should_panic(expected = "Baseline 'some-baseline' must exist before comparison is allowed")]
test_compare_baseline()177 fn test_compare_baseline() {
178 // Initial benchmark to populate
179 let dir = temp_dir();
180 short_benchmark(&dir)
181 .retain_baseline("some-baseline".to_owned())
182 .bench_function("test_compare_baseline", |b| b.iter(|| 10));
183 }
184
185 #[test]
test_sample_size()186 fn test_sample_size() {
187 let dir = temp_dir();
188 let counter = Counter::default();
189
190 let clone = counter.clone();
191 short_benchmark(&dir)
192 .sample_size(50)
193 .bench_function("test_sample_size", move |b| {
194 clone.count();
195 b.iter(|| 10)
196 });
197
198 // This function will be called more than sample_size times because of the
199 // warmup.
200 assert!(counter.read() > 50);
201 }
202
203 #[test]
test_warmup_time()204 fn test_warmup_time() {
205 let dir = temp_dir();
206 let counter1 = Counter::default();
207
208 let clone = counter1.clone();
209 short_benchmark(&dir)
210 .warm_up_time(Duration::from_millis(100))
211 .bench_function("test_warmup_time_1", move |b| {
212 clone.count();
213 b.iter(|| 10)
214 });
215
216 let counter2 = Counter::default();
217 let clone = counter2.clone();
218 short_benchmark(&dir)
219 .warm_up_time(Duration::from_millis(2000))
220 .bench_function("test_warmup_time_2", move |b| {
221 clone.count();
222 b.iter(|| 10)
223 });
224
225 assert!(counter1.read() < counter2.read());
226 }
227
228 #[test]
test_measurement_time()229 fn test_measurement_time() {
230 let dir = temp_dir();
231 let counter1 = Counter::default();
232
233 let clone = counter1.clone();
234 short_benchmark(&dir)
235 .measurement_time(Duration::from_millis(100))
236 .bench_function("test_meas_time_1", move |b| b.iter(|| clone.count()));
237
238 let counter2 = Counter::default();
239 let clone = counter2.clone();
240 short_benchmark(&dir)
241 .measurement_time(Duration::from_millis(2000))
242 .bench_function("test_meas_time_2", move |b| b.iter(|| clone.count()));
243
244 assert!(counter1.read() < counter2.read());
245 }
246
247 #[test]
test_bench_function()248 fn test_bench_function() {
249 let dir = temp_dir();
250 short_benchmark(&dir).bench_function("test_bench_function", move |b| b.iter(|| 10));
251 }
252
253 #[test]
test_bench_functions()254 fn test_bench_functions() {
255 let dir = temp_dir();
256 let function_1 = Fun::new("times 10", |b, i| b.iter(|| *i * 10));
257 let function_2 = Fun::new("times 20", |b, i| b.iter(|| *i * 20));
258
259 let functions = vec![function_1, function_2];
260
261 short_benchmark(&dir).bench_functions("test_bench_functions", functions, 20);
262 }
263
264 #[test]
test_bench_function_over_inputs()265 fn test_bench_function_over_inputs() {
266 let dir = temp_dir();
267 short_benchmark(&dir).bench_function_over_inputs(
268 "test_bench_function_over_inputs",
269 |b, i| b.iter(|| *i * 10),
270 vec![100, 1000],
271 );
272 }
273
274 #[test]
test_filtering()275 fn test_filtering() {
276 let dir = temp_dir();
277 let counter = Counter::default();
278 let clone = counter.clone();
279
280 short_benchmark(&dir)
281 .with_filter("Foo")
282 .bench_function("test_filtering", move |b| b.iter(|| clone.count()));
283
284 assert_eq!(counter.read(), 0);
285 assert!(!dir.path().join("test_filtering").is_dir());
286 }
287
288 #[test]
test_timing_loops()289 fn test_timing_loops() {
290 let dir = temp_dir();
291 short_benchmark(&dir).bench(
292 "test_timing_loops",
293 Benchmark::new("iter", |b| b.iter(|| 10))
294 .with_function("iter_with_setup", |b| {
295 b.iter_with_setup(|| vec![10], |v| v[0])
296 })
297 .with_function("iter_with_large_setup", |b| {
298 b.iter_with_large_setup(|| vec![10], |v| v[0])
299 })
300 .with_function("iter_with_large_drop", |b| {
301 b.iter_with_large_drop(|| vec![10; 100])
302 })
303 .with_function("iter_batched_small", |b| {
304 b.iter_batched(|| vec![10], |v| v[0], BatchSize::SmallInput)
305 })
306 .with_function("iter_batched_large", |b| {
307 b.iter_batched(|| vec![10], |v| v[0], BatchSize::LargeInput)
308 })
309 .with_function("iter_batched_per_iteration", |b| {
310 b.iter_batched(|| vec![10], |v| v[0], BatchSize::PerIteration)
311 })
312 .with_function("iter_batched_one_batch", |b| {
313 b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
314 })
315 .with_function("iter_batched_10_iterations", |b| {
316 b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumIterations(10))
317 })
318 .with_function("iter_batched_ref_small", |b| {
319 b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::SmallInput)
320 })
321 .with_function("iter_batched_ref_large", |b| {
322 b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::LargeInput)
323 })
324 .with_function("iter_batched_ref_per_iteration", |b| {
325 b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::PerIteration)
326 })
327 .with_function("iter_batched_ref_one_batch", |b| {
328 b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
329 })
330 .with_function("iter_batched_ref_10_iterations", |b| {
331 b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::NumIterations(10))
332 }),
333 );
334 }
335
336 #[test]
test_throughput()337 fn test_throughput() {
338 let dir = temp_dir();
339 short_benchmark(&dir).bench(
340 "test_throughput_bytes",
341 Benchmark::new("strlen", |b| b.iter(|| "foo".len())).throughput(Throughput::Bytes(3)),
342 );
343 short_benchmark(&dir).bench(
344 "test_throughput_elems",
345 ParameterizedBenchmark::new(
346 "veclen",
347 |b, v| b.iter(|| v.len()),
348 vec![vec![1], vec![1, 2, 3]],
349 )
350 .throughput(|v| Throughput::Elements(v.len() as u64)),
351 );
352 }
353
354 // Verify that all expected output files are present
355 #[test]
test_output_files()356 fn test_output_files() {
357 let tempdir = temp_dir();
358 // Run benchmarks twice to produce comparisons
359 for _ in 0..2 {
360 short_benchmark(&tempdir).bench(
361 "test_output",
362 Benchmark::new("output_1", |b| b.iter(|| 10))
363 .with_function("output_2", |b| b.iter(|| 20))
364 .with_function("output_\\/*\"?", |b| b.iter(|| 30))
365 .sampling_mode(SamplingMode::Linear),
366 );
367 }
368
369 // For each benchmark, assert that the expected files are present.
370 for x in 0..3 {
371 let dir = if x == 2 {
372 // Check that certain special characters are replaced with underscores
373 tempdir.path().join("test_output/output______")
374 } else {
375 tempdir.path().join(format!("test_output/output_{}", x + 1))
376 };
377
378 verify_stats(&dir, "new");
379 verify_stats(&dir, "base");
380 verify_json(&dir, "change/estimates.json");
381
382 if short_benchmark(&tempdir).can_plot() {
383 verify_svg(&dir, "report/MAD.svg");
384 verify_svg(&dir, "report/mean.svg");
385 verify_svg(&dir, "report/median.svg");
386 verify_svg(&dir, "report/pdf.svg");
387 verify_svg(&dir, "report/regression.svg");
388 verify_svg(&dir, "report/SD.svg");
389 verify_svg(&dir, "report/slope.svg");
390 verify_svg(&dir, "report/typical.svg");
391 verify_svg(&dir, "report/both/pdf.svg");
392 verify_svg(&dir, "report/both/regression.svg");
393 verify_svg(&dir, "report/change/mean.svg");
394 verify_svg(&dir, "report/change/median.svg");
395 verify_svg(&dir, "report/change/t-test.svg");
396
397 verify_svg(&dir, "report/pdf_small.svg");
398 verify_svg(&dir, "report/regression_small.svg");
399 verify_svg(&dir, "report/relative_pdf_small.svg");
400 verify_svg(&dir, "report/relative_regression_small.svg");
401 verify_html(&dir, "report/index.html");
402 }
403 }
404
405 // Check for overall report files
406 if short_benchmark(&tempdir).can_plot() {
407 let dir = tempdir.path().join("test_output");
408
409 verify_svg(&dir, "report/violin.svg");
410 verify_html(&dir, "report/index.html");
411 }
412
413 // Run the final summary process and check for the report that produces
414 short_benchmark(&tempdir).final_summary();
415 if short_benchmark(&tempdir).can_plot() {
416 let dir = tempdir.path().to_owned();
417
418 verify_html(&dir, "report/index.html");
419 }
420 }
421
422 #[test]
test_output_files_flat_sampling()423 fn test_output_files_flat_sampling() {
424 let tempdir = temp_dir();
425 // Run benchmark twice to produce comparisons
426 for _ in 0..2 {
427 short_benchmark(&tempdir).bench(
428 "test_output",
429 Benchmark::new("output_flat", |b| b.iter(|| 10)).sampling_mode(SamplingMode::Flat),
430 );
431 }
432
433 let dir = tempdir.path().join("test_output/output_flat");
434
435 verify_stats(&dir, "new");
436 verify_stats(&dir, "base");
437 verify_json(&dir, "change/estimates.json");
438
439 if short_benchmark(&tempdir).can_plot() {
440 verify_svg(&dir, "report/MAD.svg");
441 verify_svg(&dir, "report/mean.svg");
442 verify_svg(&dir, "report/median.svg");
443 verify_svg(&dir, "report/pdf.svg");
444 verify_svg(&dir, "report/iteration_times.svg");
445 verify_svg(&dir, "report/SD.svg");
446 verify_svg(&dir, "report/typical.svg");
447 verify_svg(&dir, "report/both/pdf.svg");
448 verify_svg(&dir, "report/both/iteration_times.svg");
449 verify_svg(&dir, "report/change/mean.svg");
450 verify_svg(&dir, "report/change/median.svg");
451 verify_svg(&dir, "report/change/t-test.svg");
452
453 verify_svg(&dir, "report/pdf_small.svg");
454 verify_svg(&dir, "report/iteration_times_small.svg");
455 verify_svg(&dir, "report/relative_pdf_small.svg");
456 verify_svg(&dir, "report/relative_iteration_times_small.svg");
457 verify_html(&dir, "report/index.html");
458 }
459 }
460
461 #[test]
462 #[should_panic(expected = "Benchmark function must call Bencher::iter or related method.")]
test_bench_with_no_iteration_panics()463 fn test_bench_with_no_iteration_panics() {
464 let dir = temp_dir();
465 short_benchmark(&dir).bench("test_no_iter", Benchmark::new("no_iter", |_b| {}));
466 }
467
468 #[test]
test_benchmark_group_with_input()469 fn test_benchmark_group_with_input() {
470 let dir = temp_dir();
471 let mut c = short_benchmark(&dir);
472 let mut group = c.benchmark_group("Test Group");
473 for x in 0..2 {
474 group.bench_with_input(BenchmarkId::new("Test 1", x), &x, |b, i| b.iter(|| i));
475 group.bench_with_input(BenchmarkId::new("Test 2", x), &x, |b, i| b.iter(|| i));
476 }
477 group.finish();
478 }
479
480 #[test]
test_benchmark_group_without_input()481 fn test_benchmark_group_without_input() {
482 let dir = temp_dir();
483 let mut c = short_benchmark(&dir);
484 let mut group = c.benchmark_group("Test Group 2");
485 group.bench_function("Test 1", |b| b.iter(|| 30));
486 group.bench_function("Test 2", |b| b.iter(|| 20));
487 group.finish();
488 }
489
490 #[test]
test_criterion_doesnt_panic_if_measured_time_is_zero()491 fn test_criterion_doesnt_panic_if_measured_time_is_zero() {
492 let dir = temp_dir();
493 let mut c = short_benchmark(&dir);
494 c.bench_function("zero_time", |bencher| {
495 bencher.iter_custom(|_iters| Duration::new(0, 0))
496 });
497 }
498
499 mod macros {
500 use super::{criterion, criterion_group, criterion_main};
501
502 #[test]
503 #[should_panic(expected = "group executed")]
criterion_main()504 fn criterion_main() {
505 fn group() {}
506 fn group2() {
507 panic!("group executed");
508 }
509
510 criterion_main!(group, group2);
511
512 main();
513 }
514
515 #[test]
criterion_main_trailing_comma()516 fn criterion_main_trailing_comma() {
517 // make this a compile-only check
518 // as the second logger initialization causes panic
519 #[allow(dead_code)]
520 fn group() {}
521 #[allow(dead_code)]
522 fn group2() {}
523
524 criterion_main!(group, group2,);
525
526 // silence dead_code warning
527 if false {
528 main()
529 }
530 }
531
532 #[test]
533 #[should_panic(expected = "group executed")]
criterion_group()534 fn criterion_group() {
535 use self::criterion::Criterion;
536
537 fn group(_crit: &mut Criterion) {}
538 fn group2(_crit: &mut Criterion) {
539 panic!("group executed");
540 }
541
542 criterion_group!(test_group, group, group2);
543
544 test_group();
545 }
546
547 #[test]
548 #[should_panic(expected = "group executed")]
criterion_group_trailing_comma()549 fn criterion_group_trailing_comma() {
550 use self::criterion::Criterion;
551
552 fn group(_crit: &mut Criterion) {}
553 fn group2(_crit: &mut Criterion) {
554 panic!("group executed");
555 }
556
557 criterion_group!(test_group, group, group2,);
558
559 test_group();
560 }
561 }
562
563 struct TestProfiler {
564 started: Rc<Cell<u32>>,
565 stopped: Rc<Cell<u32>>,
566 }
567 impl Profiler for TestProfiler {
start_profiling(&mut self, benchmark_id: &str, _benchmark_path: &Path)568 fn start_profiling(&mut self, benchmark_id: &str, _benchmark_path: &Path) {
569 assert!(benchmark_id.contains("profile_test"));
570 self.started.set(self.started.get() + 1);
571 }
stop_profiling(&mut self, benchmark_id: &str, _benchmark_path: &Path)572 fn stop_profiling(&mut self, benchmark_id: &str, _benchmark_path: &Path) {
573 assert!(benchmark_id.contains("profile_test"));
574 self.stopped.set(self.stopped.get() + 1);
575 }
576 }
577
578 // Verify that profilers are started and stopped as expected
579 #[test]
test_profiler_called()580 fn test_profiler_called() {
581 let started = Rc::new(Cell::new(0u32));
582 let stopped = Rc::new(Cell::new(0u32));
583 let profiler = TestProfiler {
584 started: started.clone(),
585 stopped: stopped.clone(),
586 };
587 let dir = temp_dir();
588 let mut criterion = short_benchmark(&dir)
589 .with_profiler(profiler)
590 .profile_time(Some(Duration::from_secs(1)));
591 criterion.bench_function("profile_test", |b| b.iter(|| 10));
592 assert_eq!(1, started.get());
593 assert_eq!(1, stopped.get());
594 }
595