1 //! A statistics-driven micro-benchmarking library written in Rust.
2 //!
3 //! This crate is a microbenchmarking library which aims to provide strong
4 //! statistical confidence in detecting and estimating the size of performance
5 //! improvements and regressions, while also being easy to use.
6 //!
7 //! See
8 //! [the user guide](https://bheisler.github.io/criterion.rs/book/index.html)
9 //! for examples as well as details on the measurement and analysis process,
10 //! and the output.
11 //!
12 //! ## Features:
13 //! * Collects detailed statistics, providing strong confidence that changes
14 //!   to performance are real, not measurement noise.
15 //! * Produces detailed charts, providing thorough understanding of your code's
16 //!   performance behavior.
17 
18 #![warn(missing_docs)]
19 #![warn(bare_trait_objects)]
20 #![cfg_attr(feature = "real_blackbox", feature(test))]
21 #![cfg_attr(
22     feature = "cargo-clippy",
23     allow(
24         clippy::just_underscores_and_digits, // Used in the stats code
25         clippy::transmute_ptr_to_ptr, // Used in the stats code
26         clippy::manual_non_exhaustive, // Remove when MSRV bumped above 1.40
27     )
28 )]
29 
30 #[cfg(test)]
31 extern crate approx;
32 
33 #[cfg(test)]
34 extern crate quickcheck;
35 
36 use clap::value_t;
37 use regex::Regex;
38 
39 #[macro_use]
40 extern crate lazy_static;
41 
42 #[cfg(feature = "real_blackbox")]
43 extern crate test;
44 
45 #[macro_use]
46 extern crate serde_derive;
47 
48 // Needs to be declared before other modules
49 // in order to be usable there.
50 #[macro_use]
51 mod macros_private;
52 #[macro_use]
53 mod analysis;
54 mod benchmark;
55 #[macro_use]
56 mod benchmark_group;
57 pub mod async_executor;
58 mod bencher;
59 mod connection;
60 mod csv_report;
61 mod error;
62 mod estimate;
63 mod format;
64 mod fs;
65 mod html;
66 mod kde;
67 mod macros;
68 pub mod measurement;
69 mod plot;
70 pub mod profiler;
71 mod report;
72 mod routine;
73 mod stats;
74 
75 use std::cell::RefCell;
76 use std::collections::HashSet;
77 use std::default::Default;
78 use std::env;
79 use std::fmt;
80 use std::iter::IntoIterator;
81 use std::marker::PhantomData;
82 use std::net::TcpStream;
83 use std::path::{Path, PathBuf};
84 use std::process::Command;
85 use std::sync::{Mutex, MutexGuard};
86 use std::time::Duration;
87 
88 use criterion_plot::{Version, VersionError};
89 
90 use crate::benchmark::BenchmarkConfig;
91 use crate::benchmark::NamedRoutine;
92 use crate::connection::Connection;
93 use crate::connection::OutgoingMessage;
94 use crate::csv_report::FileCsvReport;
95 use crate::html::Html;
96 use crate::measurement::{Measurement, WallTime};
97 use crate::plot::{Gnuplot, Plotter, PlottersBackend};
98 use crate::profiler::{ExternalProfiler, Profiler};
99 use crate::report::{BencherReport, CliReport, Report, ReportContext, Reports};
100 use crate::routine::Function;
101 
102 #[cfg(feature = "async")]
103 pub use crate::bencher::AsyncBencher;
104 pub use crate::bencher::Bencher;
105 #[allow(deprecated)]
106 pub use crate::benchmark::{Benchmark, BenchmarkDefinition, ParameterizedBenchmark};
107 pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
108 
109 lazy_static! {
110     static ref DEBUG_ENABLED: bool = std::env::var_os("CRITERION_DEBUG").is_some();
111     static ref GNUPLOT_VERSION: Result<Version, VersionError> = criterion_plot::version();
112     static ref DEFAULT_PLOTTING_BACKEND: PlottingBackend = {
113         match &*GNUPLOT_VERSION {
114             Ok(_) => PlottingBackend::Gnuplot,
115             Err(e) => {
116                 match e {
117                     VersionError::Exec(_) => println!("Gnuplot not found, using plotters backend"),
118                     e => println!(
119                         "Gnuplot not found or not usable, using plotters backend\n{}",
120                         e
121                     ),
122                 };
123                 PlottingBackend::Plotters
124             }
125         }
126     };
127     static ref CARGO_CRITERION_CONNECTION: Option<Mutex<Connection>> = {
128         match std::env::var("CARGO_CRITERION_PORT") {
129             Ok(port_str) => {
130                 let port: u16 = port_str.parse().ok()?;
131                 let stream = TcpStream::connect(("localhost", port)).ok()?;
132                 Some(Mutex::new(Connection::new(stream).ok()?))
133             }
134             Err(_) => None,
135         }
136     };
137     static ref DEFAULT_OUTPUT_DIRECTORY: PathBuf = {
138         // Set criterion home to (in descending order of preference):
139         // - $CRITERION_HOME (cargo-criterion sets this, but other users could as well)
140         // - $CARGO_TARGET_DIR/criterion
141         // - the cargo target dir from `cargo metadata`
142         // - ./target/criterion
143         if let Some(value) = env::var_os("CRITERION_HOME") {
144             PathBuf::from(value)
145         } else if let Some(path) = cargo_target_directory() {
146             path.join("criterion")
147         } else {
148             PathBuf::from("target/criterion")
149         }
150     };
151 }
152 
debug_enabled() -> bool153 fn debug_enabled() -> bool {
154     *DEBUG_ENABLED
155 }
156 
157 /// A function that is opaque to the optimizer, used to prevent the compiler from
158 /// optimizing away computations in a benchmark.
159 ///
160 /// This variant is backed by the (unstable) test::black_box function.
161 #[cfg(feature = "real_blackbox")]
black_box<T>(dummy: T) -> T162 pub fn black_box<T>(dummy: T) -> T {
163     test::black_box(dummy)
164 }
165 
166 /// A function that is opaque to the optimizer, used to prevent the compiler from
167 /// optimizing away computations in a benchmark.
168 ///
169 /// This variant is stable-compatible, but it may cause some performance overhead
170 /// or fail to prevent code from being eliminated.
171 #[cfg(not(feature = "real_blackbox"))]
black_box<T>(dummy: T) -> T172 pub fn black_box<T>(dummy: T) -> T {
173     unsafe {
174         let ret = std::ptr::read_volatile(&dummy);
175         std::mem::forget(dummy);
176         ret
177     }
178 }
179 
180 /// Representing a function to benchmark together with a name of that function.
181 /// Used together with `bench_functions` to represent one out of multiple functions
182 /// under benchmark.
183 #[doc(hidden)]
184 pub struct Fun<I: fmt::Debug, M: Measurement + 'static = WallTime> {
185     f: NamedRoutine<I, M>,
186     _phantom: PhantomData<M>,
187 }
188 
189 impl<I, M: Measurement> Fun<I, M>
190 where
191     I: fmt::Debug + 'static,
192 {
193     /// Create a new `Fun` given a name and a closure
new<F>(name: &str, f: F) -> Fun<I, M> where F: FnMut(&mut Bencher<'_, M>, &I) + 'static,194     pub fn new<F>(name: &str, f: F) -> Fun<I, M>
195     where
196         F: FnMut(&mut Bencher<'_, M>, &I) + 'static,
197     {
198         let routine = NamedRoutine {
199             id: name.to_owned(),
200             f: Box::new(RefCell::new(Function::new(f))),
201         };
202 
203         Fun {
204             f: routine,
205             _phantom: PhantomData,
206         }
207     }
208 }
209 
210 /// Argument to [`Bencher::iter_batched`](struct.Bencher.html#method.iter_batched) and
211 /// [`Bencher::iter_batched_ref`](struct.Bencher.html#method.iter_batched_ref) which controls the
212 /// batch size.
213 ///
214 /// Generally speaking, almost all benchmarks should use `SmallInput`. If the input or the result
215 /// of the benchmark routine is large enough that `SmallInput` causes out-of-memory errors,
216 /// `LargeInput` can be used to reduce memory usage at the cost of increasing the measurement
217 /// overhead. If the input or the result is extremely large (or if it holds some
218 /// limited external resource like a file handle), `PerIteration` will set the number of iterations
219 /// per batch to exactly one. `PerIteration` can increase the measurement overhead substantially
220 /// and should be avoided wherever possible.
221 ///
222 /// Each value lists an estimate of the measurement overhead. This is intended as a rough guide
223 /// to assist in choosing an option, it should not be relied upon. In particular, it is not valid
224 /// to subtract the listed overhead from the measurement and assume that the result represents the
225 /// true runtime of a function. The actual measurement overhead for your specific benchmark depends
226 /// on the details of the function you're benchmarking and the hardware and operating
227 /// system running the benchmark.
228 ///
229 /// With that said, if the runtime of your function is small relative to the measurement overhead
230 /// it will be difficult to take accurate measurements. In this situation, the best option is to use
231 /// [`Bencher::iter`](struct.Bencher.html#method.iter) which has next-to-zero measurement overhead.
232 #[derive(Debug, Eq, PartialEq, Copy, Hash, Clone)]
233 pub enum BatchSize {
234     /// `SmallInput` indicates that the input to the benchmark routine (the value returned from
235     /// the setup routine) is small enough that millions of values can be safely held in memory.
236     /// Always prefer `SmallInput` unless the benchmark is using too much memory.
237     ///
238     /// In testing, the maximum measurement overhead from benchmarking with `SmallInput` is on the
239     /// order of 500 picoseconds. This is presented as a rough guide; your results may vary.
240     SmallInput,
241 
242     /// `LargeInput` indicates that the input to the benchmark routine or the value returned from
243     /// that routine is large. This will reduce the memory usage but increase the measurement
244     /// overhead.
245     ///
246     /// In testing, the maximum measurement overhead from benchmarking with `LargeInput` is on the
247     /// order of 750 picoseconds. This is presented as a rough guide; your results may vary.
248     LargeInput,
249 
250     /// `PerIteration` indicates that the input to the benchmark routine or the value returned from
251     /// that routine is extremely large or holds some limited resource, such that holding many values
252     /// in memory at once is infeasible. This provides the worst measurement overhead, but the
253     /// lowest memory usage.
254     ///
255     /// In testing, the maximum measurement overhead from benchmarking with `PerIteration` is on the
256     /// order of 350 nanoseconds or 350,000 picoseconds. This is presented as a rough guide; your
257     /// results may vary.
258     PerIteration,
259 
260     /// `NumBatches` will attempt to divide the iterations up into a given number of batches.
261     /// A larger number of batches (and thus smaller batches) will reduce memory usage but increase
262     /// measurement overhead. This allows the user to choose their own tradeoff between memory usage
263     /// and measurement overhead, but care must be taken in tuning the number of batches. Most
264     /// benchmarks should use `SmallInput` or `LargeInput` instead.
265     NumBatches(u64),
266 
267     /// `NumIterations` fixes the batch size to a constant number, specified by the user. This
268     /// allows the user to choose their own tradeoff between overhead and memory usage, but care must
269     /// be taken in tuning the batch size. In general, the measurement overhead of `NumIterations`
270     /// will be larger than that of `NumBatches`. Most benchmarks should use `SmallInput` or
271     /// `LargeInput` instead.
272     NumIterations(u64),
273 
274     #[doc(hidden)]
275     __NonExhaustive,
276 }
277 impl BatchSize {
278     /// Convert to a number of iterations per batch.
279     ///
280     /// We try to do a constant number of batches regardless of the number of iterations in this
281     /// sample. If the measurement overhead is roughly constant regardless of the number of
282     /// iterations the analysis of the results later will have an easier time separating the
283     /// measurement overhead from the benchmark time.
iters_per_batch(self, iters: u64) -> u64284     fn iters_per_batch(self, iters: u64) -> u64 {
285         match self {
286             BatchSize::SmallInput => (iters + 10 - 1) / 10,
287             BatchSize::LargeInput => (iters + 1000 - 1) / 1000,
288             BatchSize::PerIteration => 1,
289             BatchSize::NumBatches(batches) => (iters + batches - 1) / batches,
290             BatchSize::NumIterations(size) => size,
291             BatchSize::__NonExhaustive => panic!("__NonExhaustive is not a valid BatchSize."),
292         }
293     }
294 }
295 
296 /// Baseline describes how the baseline_directory is handled.
297 #[derive(Debug, Clone, Copy)]
298 pub enum Baseline {
299     /// Compare ensures a previous saved version of the baseline
300     /// exists and runs comparison against that.
301     Compare,
302     /// Save writes the benchmark results to the baseline directory,
303     /// overwriting any results that were previously there.
304     Save,
305 }
306 
307 /// Enum used to select the plotting backend.
308 #[derive(Debug, Clone, Copy)]
309 pub enum PlottingBackend {
310     /// Plotting backend which uses the external `gnuplot` command to render plots. This is the
311     /// default if the `gnuplot` command is installed.
312     Gnuplot,
313     /// Plotting backend which uses the rust 'Plotters' library. This is the default if `gnuplot`
314     /// is not installed.
315     Plotters,
316 }
317 impl PlottingBackend {
create_plotter(&self) -> Box<dyn Plotter>318     fn create_plotter(&self) -> Box<dyn Plotter> {
319         match self {
320             PlottingBackend::Gnuplot => Box::new(Gnuplot::default()),
321             PlottingBackend::Plotters => Box::new(PlottersBackend::default()),
322         }
323     }
324 }
325 
326 #[derive(Debug, Clone)]
327 /// Enum representing the execution mode.
328 pub(crate) enum Mode {
329     /// Run benchmarks normally.
330     Benchmark,
331     /// List all benchmarks but do not run them.
332     List,
333     /// Run benchmarks once to verify that they work, but otherwise do not measure them.
334     Test,
335     /// Iterate benchmarks for a given length of time but do not analyze or report on them.
336     Profile(Duration),
337 }
338 impl Mode {
is_benchmark(&self) -> bool339     pub fn is_benchmark(&self) -> bool {
340         matches!(self, Mode::Benchmark)
341     }
342 }
343 
344 /// The benchmark manager
345 ///
346 /// `Criterion` lets you configure and execute benchmarks
347 ///
348 /// Each benchmark consists of four phases:
349 ///
350 /// - **Warm-up**: The routine is repeatedly executed, to let the CPU/OS/JIT/interpreter adapt to
351 /// the new load
352 /// - **Measurement**: The routine is repeatedly executed, and timing information is collected into
353 /// a sample
354 /// - **Analysis**: The sample is analyzed and distilled into meaningful statistics that get
355 /// reported to stdout, stored in files, and plotted
356 /// - **Comparison**: The current sample is compared with the sample obtained in the previous
357 /// benchmark.
358 pub struct Criterion<M: Measurement = WallTime> {
359     config: BenchmarkConfig,
360     filter: Option<Regex>,
361     report: Reports,
362     output_directory: PathBuf,
363     baseline_directory: String,
364     baseline: Baseline,
365     load_baseline: Option<String>,
366     all_directories: HashSet<String>,
367     all_titles: HashSet<String>,
368     measurement: M,
369     profiler: Box<RefCell<dyn Profiler>>,
370     connection: Option<MutexGuard<'static, Connection>>,
371     mode: Mode,
372 }
373 
374 /// Returns the Cargo target directory, possibly calling `cargo metadata` to
375 /// figure it out.
cargo_target_directory() -> Option<PathBuf>376 fn cargo_target_directory() -> Option<PathBuf> {
377     #[derive(Deserialize)]
378     struct Metadata {
379         target_directory: PathBuf,
380     }
381 
382     env::var_os("CARGO_TARGET_DIR")
383         .map(PathBuf::from)
384         .or_else(|| {
385             let output = Command::new(env::var_os("CARGO")?)
386                 .args(&["metadata", "--format-version", "1"])
387                 .output()
388                 .ok()?;
389             let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?;
390             Some(metadata.target_directory)
391         })
392 }
393 
394 impl Default for Criterion {
395     /// Creates a benchmark manager with the following default settings:
396     ///
397     /// - Sample size: 100 measurements
398     /// - Warm-up time: 3 s
399     /// - Measurement time: 5 s
400     /// - Bootstrap size: 100 000 resamples
401     /// - Noise threshold: 0.01 (1%)
402     /// - Confidence level: 0.95
403     /// - Significance level: 0.05
404     /// - Plotting: enabled, using gnuplot if available or plotters if gnuplot is not available
405     /// - No filter
default() -> Criterion406     fn default() -> Criterion {
407         let reports = Reports {
408             cli_enabled: true,
409             cli: CliReport::new(false, false, false),
410             bencher_enabled: false,
411             bencher: BencherReport,
412             html_enabled: true,
413             html: Html::new(DEFAULT_PLOTTING_BACKEND.create_plotter()),
414             csv_enabled: true,
415             csv: FileCsvReport,
416         };
417 
418         let mut criterion = Criterion {
419             config: BenchmarkConfig {
420                 confidence_level: 0.95,
421                 measurement_time: Duration::new(5, 0),
422                 noise_threshold: 0.01,
423                 nresamples: 100_000,
424                 sample_size: 100,
425                 significance_level: 0.05,
426                 warm_up_time: Duration::new(3, 0),
427                 sampling_mode: SamplingMode::Auto,
428             },
429             filter: None,
430             report: reports,
431             baseline_directory: "base".to_owned(),
432             baseline: Baseline::Save,
433             load_baseline: None,
434             output_directory: DEFAULT_OUTPUT_DIRECTORY.clone(),
435             all_directories: HashSet::new(),
436             all_titles: HashSet::new(),
437             measurement: WallTime,
438             profiler: Box::new(RefCell::new(ExternalProfiler)),
439             connection: CARGO_CRITERION_CONNECTION
440                 .as_ref()
441                 .map(|mtx| mtx.lock().unwrap()),
442             mode: Mode::Benchmark,
443         };
444 
445         if criterion.connection.is_some() {
446             // disable all reports when connected to cargo-criterion; it will do the reporting.
447             criterion.report.cli_enabled = false;
448             criterion.report.bencher_enabled = false;
449             criterion.report.csv_enabled = false;
450             criterion.report.html_enabled = false;
451         }
452         criterion
453     }
454 }
455 
456 impl<M: Measurement> Criterion<M> {
457     /// Changes the measurement for the benchmarks run with this runner. See the
458     /// Measurement trait for more details
with_measurement<M2: Measurement>(self, m: M2) -> Criterion<M2>459     pub fn with_measurement<M2: Measurement>(self, m: M2) -> Criterion<M2> {
460         // Can't use struct update syntax here because they're technically different types.
461         Criterion {
462             config: self.config,
463             filter: self.filter,
464             report: self.report,
465             baseline_directory: self.baseline_directory,
466             baseline: self.baseline,
467             load_baseline: self.load_baseline,
468             output_directory: self.output_directory,
469             all_directories: self.all_directories,
470             all_titles: self.all_titles,
471             measurement: m,
472             profiler: self.profiler,
473             connection: self.connection,
474             mode: self.mode,
475         }
476     }
477 
478     /// Changes the internal profiler for benchmarks run with this runner. See
479     /// the Profiler trait for more details.
with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M>480     pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
481         Criterion {
482             profiler: Box::new(RefCell::new(p)),
483             ..self
484         }
485     }
486 
487     /// Set the plotting backend. By default, Criterion will use gnuplot if available, or plotters
488     /// if not.
489     ///
490     /// Panics if `backend` is `PlottingBackend::Gnuplot` and gnuplot is not available.
plotting_backend(mut self, backend: PlottingBackend) -> Criterion<M>491     pub fn plotting_backend(mut self, backend: PlottingBackend) -> Criterion<M> {
492         if let PlottingBackend::Gnuplot = backend {
493             if GNUPLOT_VERSION.is_err() {
494                 panic!("Gnuplot plotting backend was requested, but gnuplot is not available. To continue, either install Gnuplot or allow Criterion.rs to fall back to using plotters.");
495             }
496         }
497 
498         self.report.html = Html::new(backend.create_plotter());
499         self
500     }
501 
502     /// Changes the default size of the sample for benchmarks run with this runner.
503     ///
504     /// A bigger sample should yield more accurate results if paired with a sufficiently large
505     /// measurement time.
506     ///
507     /// Sample size must be at least 10.
508     ///
509     /// # Panics
510     ///
511     /// Panics if n < 10
sample_size(mut self, n: usize) -> Criterion<M>512     pub fn sample_size(mut self, n: usize) -> Criterion<M> {
513         assert!(n >= 10);
514 
515         self.config.sample_size = n;
516         self
517     }
518 
519     /// Changes the default warm up time for benchmarks run with this runner.
520     ///
521     /// # Panics
522     ///
523     /// Panics if the input duration is zero
warm_up_time(mut self, dur: Duration) -> Criterion<M>524     pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
525         assert!(dur.to_nanos() > 0);
526 
527         self.config.warm_up_time = dur;
528         self
529     }
530 
531     /// Changes the default measurement time for benchmarks run with this runner.
532     ///
533     /// With a longer time, the measurement will become more resilient to transitory peak loads
534     /// caused by external programs
535     ///
536     /// **Note**: If the measurement time is too "low", Criterion will automatically increase it
537     ///
538     /// # Panics
539     ///
540     /// Panics if the input duration in zero
measurement_time(mut self, dur: Duration) -> Criterion<M>541     pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
542         assert!(dur.to_nanos() > 0);
543 
544         self.config.measurement_time = dur;
545         self
546     }
547 
548     /// Changes the default number of resamples for benchmarks run with this runner.
549     ///
550     /// Number of resamples to use for the
551     /// [bootstrap](http://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Case_resampling)
552     ///
553     /// A larger number of resamples reduces the random sampling errors, which are inherent to the
554     /// bootstrap method, but also increases the analysis time
555     ///
556     /// # Panics
557     ///
558     /// Panics if the number of resamples is set to zero
nresamples(mut self, n: usize) -> Criterion<M>559     pub fn nresamples(mut self, n: usize) -> Criterion<M> {
560         assert!(n > 0);
561         if n <= 1000 {
562             println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
563         }
564 
565         self.config.nresamples = n;
566         self
567     }
568 
569     /// Changes the default noise threshold for benchmarks run with this runner. The noise threshold
570     /// is used to filter out small changes in performance, even if they are statistically
571     /// significant. Sometimes benchmarking the same code twice will result in small but
572     /// statistically significant differences solely because of noise. This provides a way to filter
573     /// out some of these false positives at the cost of making it harder to detect small changes
574     /// to the true performance of the benchmark.
575     ///
576     /// The default is 0.01, meaning that changes smaller than 1% will be ignored.
577     ///
578     /// # Panics
579     ///
580     /// Panics if the threshold is set to a negative value
noise_threshold(mut self, threshold: f64) -> Criterion<M>581     pub fn noise_threshold(mut self, threshold: f64) -> Criterion<M> {
582         assert!(threshold >= 0.0);
583 
584         self.config.noise_threshold = threshold;
585         self
586     }
587 
588     /// Changes the default confidence level for benchmarks run with this runner. The confidence
589     /// level is the desired probability that the true runtime lies within the estimated
590     /// [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval). The default is
591     /// 0.95, meaning that the confidence interval should capture the true value 95% of the time.
592     ///
593     /// # Panics
594     ///
595     /// Panics if the confidence level is set to a value outside the `(0, 1)` range
confidence_level(mut self, cl: f64) -> Criterion<M>596     pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
597         assert!(cl > 0.0 && cl < 1.0);
598         if cl < 0.5 {
599             println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
600         }
601 
602         self.config.confidence_level = cl;
603         self
604     }
605 
606     /// Changes the default [significance level](https://en.wikipedia.org/wiki/Statistical_significance)
607     /// for benchmarks run with this runner. This is used to perform a
608     /// [hypothesis test](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing) to see if
609     /// the measurements from this run are different from the measured performance of the last run.
610     /// The significance level is the desired probability that two measurements of identical code
611     /// will be considered 'different' due to noise in the measurements. The default value is 0.05,
612     /// meaning that approximately 5% of identical benchmarks will register as different due to
613     /// noise.
614     ///
615     /// This presents a trade-off. By setting the significance level closer to 0.0, you can increase
616     /// the statistical robustness against noise, but it also weakens Criterion.rs' ability to
617     /// detect small but real changes in the performance. By setting the significance level
618     /// closer to 1.0, Criterion.rs will be more able to detect small true changes, but will also
619     /// report more spurious differences.
620     ///
621     /// See also the noise threshold setting.
622     ///
623     /// # Panics
624     ///
625     /// Panics if the significance level is set to a value outside the `(0, 1)` range
significance_level(mut self, sl: f64) -> Criterion<M>626     pub fn significance_level(mut self, sl: f64) -> Criterion<M> {
627         assert!(sl > 0.0 && sl < 1.0);
628 
629         self.config.significance_level = sl;
630         self
631     }
632 
633     /// Enables plotting
with_plots(mut self) -> Criterion<M>634     pub fn with_plots(mut self) -> Criterion<M> {
635         // If running under cargo-criterion then don't re-enable the reports; let it do the reporting.
636         if self.connection.is_none() {
637             self.report.html_enabled = true;
638         }
639         self
640     }
641 
642     /// Disables plotting
without_plots(mut self) -> Criterion<M>643     pub fn without_plots(mut self) -> Criterion<M> {
644         self.report.html_enabled = false;
645         self
646     }
647 
648     /// Return true if generation of the plots is possible.
649     #[deprecated(
650         since = "0.3.4",
651         note = "No longer useful; since the plotters backend is available Criterion.rs can always generate plots"
652     )]
can_plot(&self) -> bool653     pub fn can_plot(&self) -> bool {
654         // Trivially true now that we have plotters.
655         // TODO: Deprecate and remove this.
656         true
657     }
658 
659     /// Names an explicit baseline and enables overwriting the previous results.
save_baseline(mut self, baseline: String) -> Criterion<M>660     pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
661         self.baseline_directory = baseline;
662         self.baseline = Baseline::Save;
663         self
664     }
665 
666     /// Names an explicit baseline and disables overwriting the previous results.
retain_baseline(mut self, baseline: String) -> Criterion<M>667     pub fn retain_baseline(mut self, baseline: String) -> Criterion<M> {
668         self.baseline_directory = baseline;
669         self.baseline = Baseline::Compare;
670         self
671     }
672 
673     /// Filters the benchmarks. Only benchmarks with names that contain the
674     /// given string will be executed.
with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M>675     pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
676         let filter_text = filter.into();
677         let filter = Regex::new(&filter_text).unwrap_or_else(|err| {
678             panic!(
679                 "Unable to parse '{}' as a regular expression: {}",
680                 filter_text, err
681             )
682         });
683         self.filter = Some(filter);
684 
685         self
686     }
687 
688     /// Override whether the CLI output will be colored or not. Usually you would use the `--color`
689     /// CLI argument, but this is available for programmmatic use as well.
with_output_color(mut self, enabled: bool) -> Criterion<M>690     pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
691         self.report.cli.enable_text_coloring = enabled;
692         self
693     }
694 
695     /// Set the output directory (currently for testing only)
696     #[doc(hidden)]
output_directory(mut self, path: &Path) -> Criterion<M>697     pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
698         self.output_directory = path.to_owned();
699 
700         self
701     }
702 
703     /// Set the profile time (currently for testing only)
704     #[doc(hidden)]
profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M>705     pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
706         match profile_time {
707             Some(time) => self.mode = Mode::Profile(time),
708             None => self.mode = Mode::Benchmark,
709         }
710 
711         self
712     }
713 
714     /// Generate the final summary at the end of a run.
715     #[doc(hidden)]
final_summary(&self)716     pub fn final_summary(&self) {
717         if !self.mode.is_benchmark() {
718             return;
719         }
720 
721         let report_context = ReportContext {
722             output_directory: self.output_directory.clone(),
723             plot_config: PlotConfiguration::default(),
724         };
725 
726         self.report.final_summary(&report_context);
727     }
728 
729     /// Configure this criterion struct based on the command-line arguments to
730     /// this process.
731     #[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))]
configure_from_args(mut self) -> Criterion<M>732     pub fn configure_from_args(mut self) -> Criterion<M> {
733         use clap::{App, Arg};
734         let matches = App::new("Criterion Benchmark")
735             .arg(Arg::with_name("FILTER")
736                 .help("Skip benchmarks whose names do not contain FILTER.")
737                 .index(1))
738             .arg(Arg::with_name("color")
739                 .short("c")
740                 .long("color")
741                 .alias("colour")
742                 .takes_value(true)
743                 .possible_values(&["auto", "always", "never"])
744                 .default_value("auto")
745                 .help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
746             .arg(Arg::with_name("verbose")
747                 .short("v")
748                 .long("verbose")
749                 .help("Print additional statistical information."))
750             .arg(Arg::with_name("noplot")
751                 .short("n")
752                 .long("noplot")
753                 .help("Disable plot and HTML generation."))
754             .arg(Arg::with_name("save-baseline")
755                 .short("s")
756                 .long("save-baseline")
757                 .default_value("base")
758                 .help("Save results under a named baseline."))
759             .arg(Arg::with_name("baseline")
760                 .short("b")
761                 .long("baseline")
762                 .takes_value(true)
763                 .conflicts_with("save-baseline")
764                 .help("Compare to a named baseline."))
765             .arg(Arg::with_name("list")
766                 .long("list")
767                 .help("List all benchmarks")
768                 .conflicts_with_all(&["test", "profile-time"]))
769             .arg(Arg::with_name("profile-time")
770                 .long("profile-time")
771                 .takes_value(true)
772                 .help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
773                 .conflicts_with_all(&["test", "list"]))
774             .arg(Arg::with_name("load-baseline")
775                  .long("load-baseline")
776                  .takes_value(true)
777                  .conflicts_with("profile-time")
778                  .requires("baseline")
779                  .help("Load a previous baseline instead of sampling new data."))
780             .arg(Arg::with_name("sample-size")
781                 .long("sample-size")
782                 .takes_value(true)
783                 .help(&format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
784             .arg(Arg::with_name("warm-up-time")
785                 .long("warm-up-time")
786                 .takes_value(true)
787                 .help(&format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
788             .arg(Arg::with_name("measurement-time")
789                 .long("measurement-time")
790                 .takes_value(true)
791                 .help(&format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
792             .arg(Arg::with_name("nresamples")
793                 .long("nresamples")
794                 .takes_value(true)
795                 .help(&format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
796             .arg(Arg::with_name("noise-threshold")
797                 .long("noise-threshold")
798                 .takes_value(true)
799                 .help(&format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
800             .arg(Arg::with_name("confidence-level")
801                 .long("confidence-level")
802                 .takes_value(true)
803                 .help(&format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
804             .arg(Arg::with_name("significance-level")
805                 .long("significance-level")
806                 .takes_value(true)
807                 .help(&format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
808             .arg(Arg::with_name("test")
809                 .hidden(true)
810                 .long("test")
811                 .help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
812                 .conflicts_with_all(&["list", "profile-time"]))
813             .arg(Arg::with_name("bench")
814                 .hidden(true)
815                 .long("bench"))
816             .arg(Arg::with_name("plotting-backend")
817                  .long("plotting-backend")
818                  .takes_value(true)
819                  .possible_values(&["gnuplot", "plotters"])
820                  .help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
821             .arg(Arg::with_name("output-format")
822                 .long("output-format")
823                 .takes_value(true)
824                 .possible_values(&["criterion", "bencher"])
825                 .default_value("criterion")
826                 .help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
827             .arg(Arg::with_name("nocapture")
828                 .long("nocapture")
829                 .hidden(true)
830                 .help("Ignored, but added for compatibility with libtest."))
831             .arg(Arg::with_name("version")
832                 .hidden(true)
833                 .short("V")
834                 .long("version"))
835             .after_help("
836 This executable is a Criterion.rs benchmark.
837 See https://github.com/bheisler/criterion.rs for more details.
838 
839 To enable debug output, define the environment variable CRITERION_DEBUG.
840 Criterion.rs will output more debug information and will save the gnuplot
841 scripts alongside the generated plots.
842 
843 To test that the benchmarks work, run `cargo test --benches`
844 
845 NOTE: If you see an 'unrecognized option' error using any of the options above, see:
846 https://bheisler.github.io/criterion.rs/book/faq.html
847 ")
848             .get_matches();
849 
850         if self.connection.is_some() {
851             if let Some(color) = matches.value_of("color") {
852                 if color != "auto" {
853                     println!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
854                 }
855             }
856             if matches.is_present("verbose") {
857                 println!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
858             }
859             if matches.is_present("noplot") {
860                 println!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
861             }
862             if let Some(backend) = matches.value_of("plotting-backend") {
863                 println!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
864             }
865             if let Some(format) = matches.value_of("output-format") {
866                 if format != "criterion" {
867                     println!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
868                 }
869             }
870 
871             if matches.is_present("baseline")
872                 || matches
873                     .value_of("save-baseline")
874                     .map(|base| base != "base")
875                     .unwrap_or(false)
876                 || matches.is_present("load-baseline")
877             {
878                 println!("Error: baselines are not supported when running with cargo-criterion.");
879                 std::process::exit(1);
880             }
881         }
882 
883         let bench = matches.is_present("bench");
884         let test = matches.is_present("test");
885         let test_mode = match (bench, test) {
886             (true, true) => true,   // cargo bench -- --test should run tests
887             (true, false) => false, // cargo bench should run benchmarks
888             (false, _) => true,     // cargo test --benches should run tests
889         };
890 
891         self.mode = if test_mode {
892             Mode::Test
893         } else if matches.is_present("list") {
894             Mode::List
895         } else if matches.is_present("profile-time") {
896             let num_seconds = value_t!(matches.value_of("profile-time"), u64).unwrap_or_else(|e| {
897                 println!("{}", e);
898                 std::process::exit(1)
899             });
900 
901             if num_seconds < 1 {
902                 println!("Profile time must be at least one second.");
903                 std::process::exit(1);
904             }
905 
906             Mode::Profile(Duration::from_secs(num_seconds))
907         } else {
908             Mode::Benchmark
909         };
910 
911         // This is kind of a hack, but disable the connection to the runner if we're not benchmarking.
912         if !self.mode.is_benchmark() {
913             self.connection = None;
914         }
915 
916         if let Some(filter) = matches.value_of("FILTER") {
917             self = self.with_filter(filter);
918         }
919 
920         match matches.value_of("plotting-backend") {
921             // Use plotting_backend() here to re-use the panic behavior if Gnuplot is not available.
922             Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot),
923             Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters),
924             Some(val) => panic!("Unexpected plotting backend '{}'", val),
925             None => {}
926         }
927 
928         if matches.is_present("noplot") {
929             self = self.without_plots();
930         } else {
931             self = self.with_plots();
932         }
933 
934         if let Some(dir) = matches.value_of("save-baseline") {
935             self.baseline = Baseline::Save;
936             self.baseline_directory = dir.to_owned()
937         }
938         if let Some(dir) = matches.value_of("baseline") {
939             self.baseline = Baseline::Compare;
940             self.baseline_directory = dir.to_owned();
941         }
942 
943         if self.connection.is_some() {
944             // disable all reports when connected to cargo-criterion; it will do the reporting.
945             self.report.cli_enabled = false;
946             self.report.bencher_enabled = false;
947             self.report.csv_enabled = false;
948             self.report.html_enabled = false;
949         } else {
950             match matches.value_of("output-format") {
951                 Some("bencher") => {
952                     self.report.bencher_enabled = true;
953                     self.report.cli_enabled = false;
954                 }
955                 _ => {
956                     let verbose = matches.is_present("verbose");
957                     let stdout_isatty = atty::is(atty::Stream::Stdout);
958                     let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
959                     let enable_text_coloring;
960                     match matches.value_of("color") {
961                         Some("always") => {
962                             enable_text_coloring = true;
963                         }
964                         Some("never") => {
965                             enable_text_coloring = false;
966                             enable_text_overwrite = false;
967                         }
968                         _ => enable_text_coloring = stdout_isatty,
969                     };
970                     self.report.bencher_enabled = false;
971                     self.report.cli_enabled = true;
972                     self.report.cli =
973                         CliReport::new(enable_text_overwrite, enable_text_coloring, verbose);
974                 }
975             };
976         }
977 
978         if let Some(dir) = matches.value_of("load-baseline") {
979             self.load_baseline = Some(dir.to_owned());
980         }
981 
982         if matches.is_present("sample-size") {
983             let num_size = value_t!(matches.value_of("sample-size"), usize).unwrap_or_else(|e| {
984                 println!("{}", e);
985                 std::process::exit(1)
986             });
987 
988             assert!(num_size >= 10);
989             self.config.sample_size = num_size;
990         }
991         if matches.is_present("warm-up-time") {
992             let num_seconds = value_t!(matches.value_of("warm-up-time"), u64).unwrap_or_else(|e| {
993                 println!("{}", e);
994                 std::process::exit(1)
995             });
996 
997             let dur = std::time::Duration::new(num_seconds, 0);
998             assert!(dur.to_nanos() > 0);
999 
1000             self.config.warm_up_time = dur;
1001         }
1002         if matches.is_present("measurement-time") {
1003             let num_seconds =
1004                 value_t!(matches.value_of("measurement-time"), u64).unwrap_or_else(|e| {
1005                     println!("{}", e);
1006                     std::process::exit(1)
1007                 });
1008 
1009             let dur = std::time::Duration::new(num_seconds, 0);
1010             assert!(dur.to_nanos() > 0);
1011 
1012             self.config.measurement_time = dur;
1013         }
1014         if matches.is_present("nresamples") {
1015             let num_resamples =
1016                 value_t!(matches.value_of("nresamples"), usize).unwrap_or_else(|e| {
1017                     println!("{}", e);
1018                     std::process::exit(1)
1019                 });
1020 
1021             assert!(num_resamples > 0);
1022 
1023             self.config.nresamples = num_resamples;
1024         }
1025         if matches.is_present("noise-threshold") {
1026             let num_noise_threshold = value_t!(matches.value_of("noise-threshold"), f64)
1027                 .unwrap_or_else(|e| {
1028                     println!("{}", e);
1029                     std::process::exit(1)
1030                 });
1031 
1032             assert!(num_noise_threshold > 0.0);
1033 
1034             self.config.noise_threshold = num_noise_threshold;
1035         }
1036         if matches.is_present("confidence-level") {
1037             let num_confidence_level = value_t!(matches.value_of("confidence-level"), f64)
1038                 .unwrap_or_else(|e| {
1039                     println!("{}", e);
1040                     std::process::exit(1)
1041                 });
1042 
1043             assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
1044 
1045             self.config.confidence_level = num_confidence_level;
1046         }
1047         if matches.is_present("significance-level") {
1048             let num_significance_level = value_t!(matches.value_of("significance-level"), f64)
1049                 .unwrap_or_else(|e| {
1050                     println!("{}", e);
1051                     std::process::exit(1)
1052                 });
1053 
1054             assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
1055 
1056             self.config.significance_level = num_significance_level;
1057         }
1058 
1059         self
1060     }
1061 
filter_matches(&self, id: &str) -> bool1062     fn filter_matches(&self, id: &str) -> bool {
1063         match self.filter {
1064             Some(ref regex) => regex.is_match(id),
1065             None => true,
1066         }
1067     }
1068 
1069     /// Return a benchmark group. All benchmarks performed using a benchmark group will be
1070     /// grouped together in the final report.
1071     ///
1072     /// # Examples:
1073     ///
1074     /// ```rust
1075     /// #[macro_use] extern crate criterion;
1076     /// use self::criterion::*;
1077     ///
1078     /// fn bench_simple(c: &mut Criterion) {
1079     ///     let mut group = c.benchmark_group("My Group");
1080     ///
1081     ///     // Now we can perform benchmarks with this group
1082     ///     group.bench_function("Bench 1", |b| b.iter(|| 1 ));
1083     ///     group.bench_function("Bench 2", |b| b.iter(|| 2 ));
1084     ///
1085     ///     group.finish();
1086     /// }
1087     /// criterion_group!(benches, bench_simple);
1088     /// criterion_main!(benches);
1089     /// ```
1090     /// # Panics:
1091     /// Panics if the group name is empty
benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M>1092     pub fn benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M> {
1093         let group_name = group_name.into();
1094         if group_name.is_empty() {
1095             panic!("Group name must not be empty.");
1096         }
1097 
1098         if let Some(conn) = &self.connection {
1099             conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name })
1100                 .unwrap();
1101         }
1102 
1103         BenchmarkGroup::new(self, group_name)
1104     }
1105 }
1106 impl<M> Criterion<M>
1107 where
1108     M: Measurement + 'static,
1109 {
1110     /// Benchmarks a function. For comparing multiple functions, see `benchmark_group`.
1111     ///
1112     /// # Example
1113     ///
1114     /// ```rust
1115     /// #[macro_use] extern crate criterion;
1116     /// use self::criterion::*;
1117     ///
1118     /// fn bench(c: &mut Criterion) {
1119     ///     // Setup (construct data, allocate memory, etc)
1120     ///     c.bench_function(
1121     ///         "function_name",
1122     ///         |b| b.iter(|| {
1123     ///             // Code to benchmark goes here
1124     ///         }),
1125     ///     );
1126     /// }
1127     ///
1128     /// criterion_group!(benches, bench);
1129     /// criterion_main!(benches);
1130     /// ```
bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion<M> where F: FnMut(&mut Bencher<'_, M>),1131     pub fn bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion<M>
1132     where
1133         F: FnMut(&mut Bencher<'_, M>),
1134     {
1135         self.benchmark_group(id)
1136             .bench_function(BenchmarkId::no_function(), f);
1137         self
1138     }
1139 
1140     /// Benchmarks a function with an input. For comparing multiple functions or multiple inputs,
1141     /// see `benchmark_group`.
1142     ///
1143     /// # Example
1144     ///
1145     /// ```rust
1146     /// #[macro_use] extern crate criterion;
1147     /// use self::criterion::*;
1148     ///
1149     /// fn bench(c: &mut Criterion) {
1150     ///     // Setup (construct data, allocate memory, etc)
1151     ///     let input = 5u64;
1152     ///     c.bench_with_input(
1153     ///         BenchmarkId::new("function_name", input), &input,
1154     ///         |b, i| b.iter(|| {
1155     ///             // Code to benchmark using input `i` goes here
1156     ///         }),
1157     ///     );
1158     /// }
1159     ///
1160     /// criterion_group!(benches, bench);
1161     /// criterion_main!(benches);
1162     /// ```
bench_with_input<F, I>(&mut self, id: BenchmarkId, input: &I, f: F) -> &mut Criterion<M> where F: FnMut(&mut Bencher<'_, M>, &I),1163     pub fn bench_with_input<F, I>(&mut self, id: BenchmarkId, input: &I, f: F) -> &mut Criterion<M>
1164     where
1165         F: FnMut(&mut Bencher<'_, M>, &I),
1166     {
1167         // It's possible to use BenchmarkId::from_parameter to create a benchmark ID with no function
1168         // name. That's intended for use with BenchmarkGroups where the function name isn't necessary,
1169         // but here it is.
1170         let group_name = id.function_name.expect(
1171             "Cannot use BenchmarkId::from_parameter with Criterion::bench_with_input. \
1172                  Consider using a BenchmarkGroup or BenchmarkId::new instead.",
1173         );
1174         // Guaranteed safe because external callers can't create benchmark IDs without a parameter
1175         let parameter = id.parameter.unwrap();
1176         self.benchmark_group(group_name).bench_with_input(
1177             BenchmarkId::no_function_with_input(parameter),
1178             input,
1179             f,
1180         );
1181         self
1182     }
1183 
1184     /// Benchmarks a function under various inputs
1185     ///
1186     /// This is a convenience method to execute several related benchmarks. Each benchmark will
1187     /// receive the id: `${id}/${input}`.
1188     ///
1189     /// # Example
1190     ///
1191     /// ```rust
1192     /// # #[macro_use] extern crate criterion;
1193     /// # use self::criterion::*;
1194     ///
1195     /// fn bench(c: &mut Criterion) {
1196     ///     c.bench_function_over_inputs("from_elem",
1197     ///         |b: &mut Bencher, size: &usize| {
1198     ///             b.iter(|| vec![0u8; *size]);
1199     ///         },
1200     ///         vec![1024, 2048, 4096]
1201     ///     );
1202     /// }
1203     ///
1204     /// criterion_group!(benches, bench);
1205     /// criterion_main!(benches);
1206     /// ```
1207     #[doc(hidden)]
1208     #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
1209     #[allow(deprecated)]
bench_function_over_inputs<I, F>( &mut self, id: &str, f: F, inputs: I, ) -> &mut Criterion<M> where I: IntoIterator, I::Item: fmt::Debug + 'static, F: FnMut(&mut Bencher<'_, M>, &I::Item) + 'static,1210     pub fn bench_function_over_inputs<I, F>(
1211         &mut self,
1212         id: &str,
1213         f: F,
1214         inputs: I,
1215     ) -> &mut Criterion<M>
1216     where
1217         I: IntoIterator,
1218         I::Item: fmt::Debug + 'static,
1219         F: FnMut(&mut Bencher<'_, M>, &I::Item) + 'static,
1220     {
1221         self.bench(id, ParameterizedBenchmark::new(id, f, inputs))
1222     }
1223 
1224     /// Benchmarks multiple functions
1225     ///
1226     /// All functions get the same input and are compared with the other implementations.
1227     /// Works similar to `bench_function`, but with multiple functions.
1228     ///
1229     /// # Example
1230     ///
1231     /// ``` rust
1232     /// # #[macro_use] extern crate criterion;
1233     /// # use self::criterion::*;
1234     /// # fn seq_fib(i: &u32) {}
1235     /// # fn par_fib(i: &u32) {}
1236     ///
1237     /// fn bench_seq_fib(b: &mut Bencher, i: &u32) {
1238     ///     b.iter(|| {
1239     ///         seq_fib(i);
1240     ///     });
1241     /// }
1242     ///
1243     /// fn bench_par_fib(b: &mut Bencher, i: &u32) {
1244     ///     b.iter(|| {
1245     ///         par_fib(i);
1246     ///     });
1247     /// }
1248     ///
1249     /// fn bench(c: &mut Criterion) {
1250     ///     let sequential_fib = Fun::new("Sequential", bench_seq_fib);
1251     ///     let parallel_fib = Fun::new("Parallel", bench_par_fib);
1252     ///     let funs = vec![sequential_fib, parallel_fib];
1253     ///
1254     ///     c.bench_functions("Fibonacci", funs, 14);
1255     /// }
1256     ///
1257     /// criterion_group!(benches, bench);
1258     /// criterion_main!(benches);
1259     /// ```
1260     #[doc(hidden)]
1261     #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
1262     #[allow(deprecated)]
bench_functions<I>( &mut self, id: &str, funs: Vec<Fun<I, M>>, input: I, ) -> &mut Criterion<M> where I: fmt::Debug + 'static,1263     pub fn bench_functions<I>(
1264         &mut self,
1265         id: &str,
1266         funs: Vec<Fun<I, M>>,
1267         input: I,
1268     ) -> &mut Criterion<M>
1269     where
1270         I: fmt::Debug + 'static,
1271     {
1272         let benchmark = ParameterizedBenchmark::with_functions(
1273             funs.into_iter().map(|fun| fun.f).collect(),
1274             vec![input],
1275         );
1276 
1277         self.bench(id, benchmark)
1278     }
1279 
1280     /// Executes the given benchmark. Use this variant to execute benchmarks
1281     /// with complex configuration. This can be used to compare multiple
1282     /// functions, execute benchmarks with custom configuration settings and
1283     /// more. See the Benchmark and ParameterizedBenchmark structs for more
1284     /// information.
1285     ///
1286     /// ```rust
1287     /// # #[macro_use] extern crate criterion;
1288     /// # use criterion::*;
1289     /// # fn routine_1() {}
1290     /// # fn routine_2() {}
1291     ///
1292     /// fn bench(c: &mut Criterion) {
1293     ///     // Setup (construct data, allocate memory, etc)
1294     ///     c.bench(
1295     ///         "routines",
1296     ///         Benchmark::new("routine_1", |b| b.iter(|| routine_1()))
1297     ///             .with_function("routine_2", |b| b.iter(|| routine_2()))
1298     ///             .sample_size(50)
1299     ///     );
1300     /// }
1301     ///
1302     /// criterion_group!(benches, bench);
1303     /// criterion_main!(benches);
1304     /// ```
1305     #[doc(hidden)]
1306     #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
bench<B: BenchmarkDefinition<M>>( &mut self, group_id: &str, benchmark: B, ) -> &mut Criterion<M>1307     pub fn bench<B: BenchmarkDefinition<M>>(
1308         &mut self,
1309         group_id: &str,
1310         benchmark: B,
1311     ) -> &mut Criterion<M> {
1312         benchmark.run(group_id, self);
1313         self
1314     }
1315 }
1316 
1317 trait DurationExt {
to_nanos(&self) -> u641318     fn to_nanos(&self) -> u64;
1319 }
1320 
1321 const NANOS_PER_SEC: u64 = 1_000_000_000;
1322 
1323 impl DurationExt for Duration {
to_nanos(&self) -> u641324     fn to_nanos(&self) -> u64 {
1325         self.as_secs() * NANOS_PER_SEC + u64::from(self.subsec_nanos())
1326     }
1327 }
1328 
1329 /// Enum representing different ways of measuring the throughput of benchmarked code.
1330 /// If the throughput setting is configured for a benchmark then the estimated throughput will
1331 /// be reported as well as the time per iteration.
1332 // TODO: Remove serialize/deserialize from the public API.
1333 #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
1334 pub enum Throughput {
1335     /// Measure throughput in terms of bytes/second. The value should be the number of bytes
1336     /// processed by one iteration of the benchmarked code. Typically, this would be the length of
1337     /// an input string or `&[u8]`.
1338     Bytes(u64),
1339 
1340     /// Measure throughput in terms of elements/second. The value should be the number of elements
1341     /// processed by one iteration of the benchmarked code. Typically, this would be the size of a
1342     /// collection, but could also be the number of lines of input text or the number of values to
1343     /// parse.
1344     Elements(u64),
1345 }
1346 
1347 /// Axis scaling type
1348 #[derive(Debug, Clone, Copy)]
1349 pub enum AxisScale {
1350     /// Axes scale linearly
1351     Linear,
1352 
1353     /// Axes scale logarithmically
1354     Logarithmic,
1355 }
1356 
1357 /// Contains the configuration options for the plots generated by a particular benchmark
1358 /// or benchmark group.
1359 ///
1360 /// ```rust
1361 /// use self::criterion::{Bencher, Criterion, Benchmark, PlotConfiguration, AxisScale};
1362 ///
1363 /// let plot_config = PlotConfiguration::default()
1364 ///     .summary_scale(AxisScale::Logarithmic);
1365 ///
1366 /// // Using Criterion::default() for simplicity; normally you'd use the macros.
1367 /// let mut criterion = Criterion::default();
1368 /// let mut benchmark_group = criterion.benchmark_group("Group name");
1369 /// benchmark_group.plot_config(plot_config);
1370 /// // Use benchmark group
1371 /// ```
1372 #[derive(Debug, Clone)]
1373 pub struct PlotConfiguration {
1374     summary_scale: AxisScale,
1375 }
1376 
1377 impl Default for PlotConfiguration {
default() -> PlotConfiguration1378     fn default() -> PlotConfiguration {
1379         PlotConfiguration {
1380             summary_scale: AxisScale::Linear,
1381         }
1382     }
1383 }
1384 
1385 impl PlotConfiguration {
1386     /// Set the axis scale (linear or logarithmic) for the summary plots. Typically, you would
1387     /// set this to logarithmic if benchmarking over a range of inputs which scale exponentially.
1388     /// Defaults to linear.
summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration1389     pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration {
1390         self.summary_scale = new_scale;
1391         self
1392     }
1393 }
1394 
1395 /// This enum allows the user to control how Criterion.rs chooses the iteration count when sampling.
1396 /// The default is Auto, which will choose a method automatically based on the iteration time during
1397 /// the warm-up phase.
1398 #[derive(Debug, Clone, Copy)]
1399 pub enum SamplingMode {
1400     /// Criterion.rs should choose a sampling method automatically. This is the default, and is
1401     /// recommended for most users and most benchmarks.
1402     Auto,
1403 
1404     /// Scale the iteration count in each sample linearly. This is suitable for most benchmarks,
1405     /// but it tends to require many iterations which can make it very slow for very long benchmarks.
1406     Linear,
1407 
1408     /// Keep the iteration count the same for all samples. This is not recommended, as it affects
1409     /// the statistics that Criterion.rs can compute. However, it requires fewer iterations than
1410     /// the Linear method and therefore is more suitable for very long-running benchmarks where
1411     /// benchmark execution time is more of a problem and statistical precision is less important.
1412     Flat,
1413 }
1414 impl SamplingMode {
choose_sampling_mode( &self, warmup_mean_execution_time: f64, sample_count: u64, target_time: f64, ) -> ActualSamplingMode1415     pub(crate) fn choose_sampling_mode(
1416         &self,
1417         warmup_mean_execution_time: f64,
1418         sample_count: u64,
1419         target_time: f64,
1420     ) -> ActualSamplingMode {
1421         match self {
1422             SamplingMode::Linear => ActualSamplingMode::Linear,
1423             SamplingMode::Flat => ActualSamplingMode::Flat,
1424             SamplingMode::Auto => {
1425                 // Estimate execution time with linear sampling
1426                 let total_runs = sample_count * (sample_count + 1) / 2;
1427                 let d =
1428                     (target_time / warmup_mean_execution_time / total_runs as f64).ceil() as u64;
1429                 let expected_ns = total_runs as f64 * d as f64 * warmup_mean_execution_time;
1430 
1431                 if expected_ns > (2.0 * target_time) {
1432                     ActualSamplingMode::Flat
1433                 } else {
1434                     ActualSamplingMode::Linear
1435                 }
1436             }
1437         }
1438     }
1439 }
1440 
1441 /// Enum to represent the sampling mode without Auto.
1442 #[derive(Debug, Clone, Copy, Serialize, Deserialize)]
1443 pub(crate) enum ActualSamplingMode {
1444     Linear,
1445     Flat,
1446 }
1447 impl ActualSamplingMode {
iteration_counts( &self, warmup_mean_execution_time: f64, sample_count: u64, target_time: &Duration, ) -> Vec<u64>1448     pub(crate) fn iteration_counts(
1449         &self,
1450         warmup_mean_execution_time: f64,
1451         sample_count: u64,
1452         target_time: &Duration,
1453     ) -> Vec<u64> {
1454         match self {
1455             ActualSamplingMode::Linear => {
1456                 let n = sample_count;
1457                 let met = warmup_mean_execution_time;
1458                 let m_ns = target_time.to_nanos();
1459                 // Solve: [d + 2*d + 3*d + ... + n*d] * met = m_ns
1460                 let total_runs = n * (n + 1) / 2;
1461                 let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
1462                 let expected_ns = total_runs as f64 * d as f64 * met;
1463 
1464                 if d == 1 {
1465                     let recommended_sample_size =
1466                         ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
1467                     let actual_time = Duration::from_nanos(expected_ns as u64);
1468                     print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1469                             n, target_time, actual_time);
1470 
1471                     if recommended_sample_size != n {
1472                         println!(
1473                             ", enable flat sampling, or reduce sample count to {}.",
1474                             recommended_sample_size
1475                         );
1476                     } else {
1477                         println!(" or enable flat sampling.");
1478                     }
1479                 }
1480 
1481                 (1..(n + 1) as u64).map(|a| a * d).collect::<Vec<u64>>()
1482             }
1483             ActualSamplingMode::Flat => {
1484                 let n = sample_count;
1485                 let met = warmup_mean_execution_time;
1486                 let m_ns = target_time.to_nanos() as f64;
1487                 let time_per_sample = m_ns / (n as f64);
1488                 // This is pretty simplistic; we could do something smarter to fit into the allotted time.
1489                 let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
1490 
1491                 let expected_ns = met * (iterations_per_sample * n) as f64;
1492 
1493                 if iterations_per_sample == 1 {
1494                     let recommended_sample_size =
1495                         ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
1496                     let actual_time = Duration::from_nanos(expected_ns as u64);
1497                     print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1498                             n, target_time, actual_time);
1499 
1500                     if recommended_sample_size != n {
1501                         println!(", or reduce sample count to {}.", recommended_sample_size);
1502                     } else {
1503                         println!(".");
1504                     }
1505                 }
1506 
1507                 vec![iterations_per_sample; n as usize]
1508             }
1509         }
1510     }
1511 
is_linear(&self) -> bool1512     fn is_linear(&self) -> bool {
1513         matches!(self, ActualSamplingMode::Linear)
1514     }
1515 
recommend_linear_sample_size(target_time: f64, met: f64) -> u641516     fn recommend_linear_sample_size(target_time: f64, met: f64) -> u64 {
1517         // Some math shows that n(n+1)/2 * d * met = target_time. d = 1, so it can be ignored.
1518         // This leaves n(n+1) = (2*target_time)/met, or n^2 + n - (2*target_time)/met = 0
1519         // Which can be solved with the quadratic formula. Since A and B are constant 1,
1520         // this simplifies to sample_size = (-1 +- sqrt(1 - 4C))/2, where C = (2*target_time)/met.
1521         // We don't care about the negative solution. Experimentation shows that this actually tends to
1522         // result in twice the desired execution time (probably because of the ceil used to calculate
1523         // d) so instead I use c = target_time/met.
1524         let c = target_time / met;
1525         let sample_size = (-1.0 + (4.0 * c).sqrt()) / 2.0;
1526         let sample_size = sample_size as u64;
1527 
1528         // Round down to the nearest 10 to give a margin and avoid excessive precision
1529         let sample_size = (sample_size / 10) * 10;
1530 
1531         // Clamp it to be at least 10, since criterion.rs doesn't allow sample sizes smaller than 10.
1532         if sample_size < 10 {
1533             10
1534         } else {
1535             sample_size
1536         }
1537     }
1538 
recommend_flat_sample_size(target_time: f64, met: f64) -> u641539     fn recommend_flat_sample_size(target_time: f64, met: f64) -> u64 {
1540         let sample_size = (target_time / met) as u64;
1541 
1542         // Round down to the nearest 10 to give a margin and avoid excessive precision
1543         let sample_size = (sample_size / 10) * 10;
1544 
1545         // Clamp it to be at least 10, since criterion.rs doesn't allow sample sizes smaller than 10.
1546         if sample_size < 10 {
1547             10
1548         } else {
1549             sample_size
1550         }
1551     }
1552 }
1553 
1554 #[derive(Debug, Serialize, Deserialize)]
1555 pub(crate) struct SavedSample {
1556     sampling_mode: ActualSamplingMode,
1557     iters: Vec<f64>,
1558     times: Vec<f64>,
1559 }
1560 
1561 /// Custom-test-framework runner. Should not be called directly.
1562 #[doc(hidden)]
runner(benches: &[&dyn Fn()])1563 pub fn runner(benches: &[&dyn Fn()]) {
1564     for bench in benches {
1565         bench();
1566     }
1567     Criterion::default().configure_from_args().final_summary();
1568 }
1569 
1570 /// Print a warning informing users about upcoming changes to features
1571 #[cfg(not(feature = "html_reports"))]
1572 #[doc(hidden)]
__warn_about_html_reports_feature()1573 pub fn __warn_about_html_reports_feature() {
1574     if CARGO_CRITERION_CONNECTION.is_none() {
1575         println!(
1576             "WARNING: HTML report generation will become a non-default optional feature in Criterion.rs 0.4.0."
1577         );
1578         println!(
1579             "This feature is being moved to cargo-criterion \
1580             (https://github.com/bheisler/cargo-criterion) and will be optional in a future \
1581             version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
1582             enable the 'html_reports' feature in your Cargo.toml."
1583         );
1584         println!();
1585     }
1586 }
1587 
1588 /// Print a warning informing users about upcoming changes to features
1589 #[cfg(feature = "html_reports")]
1590 #[doc(hidden)]
__warn_about_html_reports_feature()1591 pub fn __warn_about_html_reports_feature() {
1592     // They have the feature enabled, so they're ready for the update.
1593 }
1594 
1595 /// Print a warning informing users about upcoming changes to features
1596 #[cfg(not(feature = "cargo_bench_support"))]
1597 #[doc(hidden)]
__warn_about_cargo_bench_support_feature()1598 pub fn __warn_about_cargo_bench_support_feature() {
1599     if CARGO_CRITERION_CONNECTION.is_none() {
1600         println!(
1601             "WARNING: In Criterion.rs 0.4.0, running criterion benchmarks outside of cargo-criterion will become a default optional feature."
1602         );
1603         println!(
1604             "The statistical analysis and reporting is being moved to cargo-criterion \
1605             (https://github.com/bheisler/cargo-criterion) and will be optional in a future \
1606             version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
1607             enable the 'cargo_bench_support' feature in your Cargo.toml."
1608         );
1609         println!();
1610     }
1611 }
1612 
1613 /// Print a warning informing users about upcoming changes to features
1614 #[cfg(feature = "cargo_bench_support")]
1615 #[doc(hidden)]
__warn_about_cargo_bench_support_feature()1616 pub fn __warn_about_cargo_bench_support_feature() {
1617     // They have the feature enabled, so they're ready for the update.
1618 }
1619