1 use analysis; 2 use program::CommandFactory; 3 use report::{BenchmarkId, ReportContext}; 4 use routine::{Function, Routine}; 5 use std::cell::RefCell; 6 use std::collections::HashSet; 7 use std::fmt::Debug; 8 use std::marker::Sized; 9 use std::process::Command; 10 use std::time::Duration; 11 use {Bencher, Criterion, DurationExt, PlotConfiguration, Throughput}; 12 13 /// Struct containing all of the configuration options for a benchmark. 14 pub struct BenchmarkConfig { 15 pub confidence_level: f64, 16 pub measurement_time: Duration, 17 pub noise_threshold: f64, 18 pub nresamples: usize, 19 pub sample_size: usize, 20 pub significance_level: f64, 21 pub warm_up_time: Duration, 22 } 23 24 /// Struct representing a partially-complete per-benchmark configuration. 25 struct PartialBenchmarkConfig { 26 confidence_level: Option<f64>, 27 measurement_time: Option<Duration>, 28 noise_threshold: Option<f64>, 29 nresamples: Option<usize>, 30 sample_size: Option<usize>, 31 significance_level: Option<f64>, 32 warm_up_time: Option<Duration>, 33 plot_config: PlotConfiguration, 34 } 35 36 impl Default for PartialBenchmarkConfig { default() -> Self37 fn default() -> Self { 38 PartialBenchmarkConfig { 39 confidence_level: None, 40 measurement_time: None, 41 noise_threshold: None, 42 nresamples: None, 43 sample_size: None, 44 significance_level: None, 45 warm_up_time: None, 46 plot_config: PlotConfiguration::default(), 47 } 48 } 49 } 50 51 impl PartialBenchmarkConfig { to_complete(&self, defaults: &BenchmarkConfig) -> BenchmarkConfig52 fn to_complete(&self, defaults: &BenchmarkConfig) -> BenchmarkConfig { 53 BenchmarkConfig { 54 confidence_level: self.confidence_level.unwrap_or(defaults.confidence_level), 55 measurement_time: self.measurement_time.unwrap_or(defaults.measurement_time), 56 noise_threshold: self.noise_threshold.unwrap_or(defaults.noise_threshold), 57 nresamples: self.nresamples.unwrap_or(defaults.nresamples), 58 sample_size: self.sample_size.unwrap_or(defaults.sample_size), 59 significance_level: self 60 .significance_level 61 .unwrap_or(defaults.significance_level), 62 warm_up_time: self.warm_up_time.unwrap_or(defaults.warm_up_time), 63 } 64 } 65 } 66 67 pub struct NamedRoutine<T> { 68 pub id: String, 69 pub f: Box<RefCell<Routine<T>>>, 70 } 71 72 /// Structure representing a benchmark (or group of benchmarks) 73 /// which take one parameter. 74 pub struct ParameterizedBenchmark<T: Debug> { 75 config: PartialBenchmarkConfig, 76 values: Vec<T>, 77 routines: Vec<NamedRoutine<T>>, 78 throughput: Option<Box<Fn(&T) -> Throughput>>, 79 } 80 81 /// Structure representing a benchmark (or group of benchmarks) 82 /// which takes no parameters. 83 pub struct Benchmark { 84 config: PartialBenchmarkConfig, 85 routines: Vec<NamedRoutine<()>>, 86 throughput: Option<Throughput>, 87 } 88 89 /// Common trait for `Benchmark` and `ParameterizedBenchmark`. Not intended to be 90 /// used outside of Criterion.rs. 91 pub trait BenchmarkDefinition: Sized { 92 #[doc(hidden)] run(self, group_id: &str, c: &Criterion)93 fn run(self, group_id: &str, c: &Criterion); 94 } 95 96 macro_rules! benchmark_config { 97 ($type:tt) => { 98 99 /// Changes the size of the sample for this benchmark 100 /// 101 /// A bigger sample should yield more accurate results if paired with a sufficiently large 102 /// measurement time. 103 /// 104 /// Sample size must be at least 2. 105 /// 106 /// # Panics 107 /// 108 /// Panics if set to zero or one. 109 pub fn sample_size(mut self, n: usize) -> Self { 110 assert!(n >= 2); 111 if n < 10 { 112 println!("Warning: Sample sizes < 10 will be disallowed in Criterion.rs 0.3.0."); 113 } 114 115 self.config.sample_size = Some(n); 116 self 117 } 118 119 /// Changes the warm up time for this benchmark 120 /// 121 /// # Panics 122 /// 123 /// Panics if the input duration is zero 124 pub fn warm_up_time(mut self, dur: Duration) -> Self { 125 assert!(dur.to_nanos() > 0); 126 127 self.config.warm_up_time = Some(dur); 128 self 129 } 130 131 /// Changes the target measurement time for this benchmark. Criterion will attempt 132 /// to spent approximately this amount of time measuring the benchmark. 133 /// With a longer time, the measurement will become more resilient to transitory peak loads 134 /// caused by external programs. 135 /// 136 /// # Panics 137 /// 138 /// Panics if the input duration in zero 139 pub fn measurement_time(mut self, dur: Duration) -> Self { 140 assert!(dur.to_nanos() > 0); 141 142 self.config.measurement_time = Some(dur); 143 self 144 } 145 146 /// Changes the number of resamples for this benchmark 147 /// 148 /// Number of resamples to use for the 149 /// [bootstrap](http://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Case_resampling) 150 /// 151 /// A larger number of resamples reduces the random sampling errors, which are inherent to the 152 /// bootstrap method, but also increases the analysis time. 153 /// 154 /// # Panics 155 /// 156 /// Panics if the number of resamples is set to zero 157 pub fn nresamples(mut self, n: usize) -> Self { 158 assert!(n > 0); 159 160 self.config.nresamples = Some(n); 161 self 162 } 163 164 /// Changes the noise threshold for this benchmark 165 /// 166 /// This threshold is used to decide if an increase of `X%` in the execution time is considered 167 /// significant or should be flagged as noise 168 /// 169 /// *Note:* A value of `0.02` is equivalent to `2%` 170 /// 171 /// # Panics 172 /// 173 /// Panics is the threshold is set to a negative value 174 pub fn noise_threshold(mut self, threshold: f64) -> Self { 175 assert!(threshold >= 0.0); 176 177 self.config.noise_threshold = Some(threshold); 178 self 179 } 180 181 /// Changes the confidence level for this benchmark 182 /// 183 /// The confidence level is used to calculate the 184 /// [confidence intervals](https://en.wikipedia.org/wiki/Confidence_interval) of the estimated 185 /// statistics 186 /// 187 /// # Panics 188 /// 189 /// Panics if the confidence level is set to a value outside the `(0, 1)` range 190 pub fn confidence_level(mut self, cl: f64) -> Self { 191 assert!(cl > 0.0 && cl < 1.0); 192 193 self.config.confidence_level = Some(cl); 194 self 195 } 196 197 /// Changes the [significance level](https://en.wikipedia.org/wiki/Statistical_significance) 198 /// for this benchmark 199 /// 200 /// The significance level is used for 201 /// [hypothesis testing](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing) 202 /// 203 /// # Panics 204 /// 205 /// Panics if the significance level is set to a value outside the `(0, 1)` range 206 pub fn significance_level(mut self, sl: f64) -> Self { 207 assert!(sl > 0.0 && sl < 1.0); 208 209 self.config.significance_level = Some(sl); 210 self 211 } 212 213 /// Changes the plot configuration for this benchmark. 214 pub fn plot_config(mut self, new_config: PlotConfiguration) -> Self { 215 self.config.plot_config = new_config; 216 self 217 } 218 219 } 220 } 221 222 impl Benchmark { 223 benchmark_config!(Benchmark); 224 225 /// Create a new benchmark group and adds the given function to it. 226 /// 227 /// # Example 228 /// 229 /// ```rust 230 /// # #[macro_use] extern crate criterion; 231 /// # use criterion::*; 232 /// 233 /// fn bench(c: &mut Criterion) { 234 /// // One-time setup goes here 235 /// c.bench( 236 /// "my_group", 237 /// Benchmark::new("my_function", |b| b.iter(|| { 238 /// // Code to benchmark goes here 239 /// })), 240 /// ); 241 /// } 242 /// 243 /// criterion_group!(benches, bench); 244 /// criterion_main!(benches); 245 /// ``` new<S, F>(id: S, f: F) -> Benchmark where S: Into<String>, F: FnMut(&mut Bencher) + 'static,246 pub fn new<S, F>(id: S, f: F) -> Benchmark 247 where 248 S: Into<String>, 249 F: FnMut(&mut Bencher) + 'static, 250 { 251 Benchmark { 252 config: PartialBenchmarkConfig::default(), 253 routines: vec![], 254 throughput: None, 255 } 256 .with_function(id, f) 257 } 258 259 /// Create a new benchmark group and add the given program to it. 260 /// 261 /// The external program must: 262 /// 263 /// * Read the number of iterations from stdin 264 /// * Execute the routine to benchmark that many times 265 /// * Print the elapsed time (in nanoseconds) to stdout 266 /// 267 /// ```rust,no_run 268 /// # use std::io::{self, BufRead}; 269 /// # use std::time::Instant; 270 /// # use std::time::Duration; 271 /// # trait DurationExt { fn to_nanos(&self) -> u64 { 0 } } 272 /// # impl DurationExt for Duration {} 273 /// // Example of an external program that implements this protocol 274 /// 275 /// fn main() { 276 /// let stdin = io::stdin(); 277 /// let ref mut stdin = stdin.lock(); 278 /// 279 /// // For each line in stdin 280 /// for line in stdin.lines() { 281 /// // Parse line as the number of iterations 282 /// let iters: u64 = line.unwrap().trim().parse().unwrap(); 283 /// 284 /// // Setup 285 /// 286 /// // Benchmark 287 /// let start = Instant::now(); 288 /// // Execute the routine "iters" times 289 /// for _ in 0..iters { 290 /// // Code to benchmark goes here 291 /// } 292 /// let elapsed = start.elapsed(); 293 /// 294 /// // Teardown 295 /// 296 /// // Report elapsed time in nanoseconds to stdout 297 /// println!("{}", elapsed.to_nanos()); 298 /// } 299 /// } 300 #[deprecated( 301 since = "0.2.6", 302 note = "External program benchmarks were rarely used and are awkward to maintain, so they are scheduled for deletion in 0.3.0" 303 )] 304 #[allow(deprecated)] new_external<S>(id: S, program: Command) -> Benchmark where S: Into<String>,305 pub fn new_external<S>(id: S, program: Command) -> Benchmark 306 where 307 S: Into<String>, 308 { 309 Benchmark { 310 config: PartialBenchmarkConfig::default(), 311 routines: vec![], 312 throughput: None, 313 } 314 .with_program(id, program) 315 } 316 317 /// Add a function to the benchmark group. 318 /// 319 /// # Example: 320 /// ``` 321 /// # use criterion::Benchmark; 322 /// Benchmark::new("return 10", |b| b.iter(|| 10)) 323 /// .with_function("return 20", |b| b.iter(|| 20)); 324 /// ``` with_function<S, F>(mut self, id: S, mut f: F) -> Benchmark where S: Into<String>, F: FnMut(&mut Bencher) + 'static,325 pub fn with_function<S, F>(mut self, id: S, mut f: F) -> Benchmark 326 where 327 S: Into<String>, 328 F: FnMut(&mut Bencher) + 'static, 329 { 330 let routine = NamedRoutine { 331 id: id.into(), 332 f: Box::new(RefCell::new(Function::new(move |b, _| f(b)))), 333 }; 334 self.routines.push(routine); 335 self 336 } 337 338 /// Add an external program to the benchmark group. 339 /// 340 /// # Example: 341 /// ``` 342 /// # use criterion::Benchmark; 343 /// # use std::process::Command; 344 /// Benchmark::new("internal", |b| b.iter(|| 10)) 345 /// .with_program("external", Command::new("my_external_benchmark")); 346 /// ``` 347 #[deprecated( 348 since = "0.2.6", 349 note = "External program benchmarks were rarely used and are awkward to maintain, so they are scheduled for deletion in 0.3.0" 350 )] with_program<S>(mut self, id: S, program: Command) -> Benchmark where S: Into<String>,351 pub fn with_program<S>(mut self, id: S, program: Command) -> Benchmark 352 where 353 S: Into<String>, 354 { 355 let routine = NamedRoutine { 356 id: id.into(), 357 f: Box::new(RefCell::new(program)), 358 }; 359 self.routines.push(routine); 360 self 361 } 362 363 /// Set the input size for this benchmark group. Used for reporting the 364 /// throughput. 365 /// 366 /// ``` 367 /// # use criterion::{Benchmark, Throughput}; 368 /// # use std::process::Command; 369 /// Benchmark::new("strlen", |b| b.iter(|| "foo".len())) 370 /// .throughput(Throughput::Bytes(3)); 371 /// ``` throughput(mut self, throughput: Throughput) -> Benchmark372 pub fn throughput(mut self, throughput: Throughput) -> Benchmark { 373 self.throughput = Some(throughput); 374 self 375 } 376 } 377 378 impl BenchmarkDefinition for Benchmark { run(self, group_id: &str, c: &Criterion)379 fn run(self, group_id: &str, c: &Criterion) { 380 let report_context = ReportContext { 381 output_directory: c.output_directory.clone(), 382 plotting: c.plotting, 383 plot_config: self.config.plot_config.clone(), 384 test_mode: c.test_mode, 385 }; 386 387 let config = self.config.to_complete(&c.config); 388 let num_routines = self.routines.len(); 389 390 let mut all_ids = vec![]; 391 let mut any_matched = false; 392 let mut all_directories = HashSet::new(); 393 let mut all_titles = HashSet::new(); 394 395 for routine in self.routines { 396 let function_id = if num_routines == 1 && group_id == routine.id { 397 None 398 } else { 399 Some(routine.id) 400 }; 401 402 let mut id = BenchmarkId::new( 403 group_id.to_owned(), 404 function_id, 405 None, 406 self.throughput.clone(), 407 ); 408 409 id.ensure_directory_name_unique(&all_directories); 410 all_directories.insert(id.as_directory_name().to_owned()); 411 id.ensure_title_unique(&all_titles); 412 all_titles.insert(id.as_title().to_owned()); 413 414 if c.filter_matches(id.id()) { 415 any_matched = true; 416 analysis::common( 417 &id, 418 &mut *routine.f.borrow_mut(), 419 &config, 420 c, 421 &report_context, 422 &(), 423 self.throughput.clone(), 424 ); 425 } 426 427 all_ids.push(id); 428 } 429 430 if all_ids.len() > 1 && any_matched && c.profile_time.is_none() && !c.test_mode { 431 c.report.summarize(&report_context, &all_ids); 432 } 433 if any_matched { 434 println!(); 435 } 436 } 437 } 438 impl<T> ParameterizedBenchmark<T> 439 where 440 T: Debug + 'static, 441 { 442 benchmark_config!(ParameterizedBenchmark); 443 444 /// Create a new parameterized benchmark group and adds the given function 445 /// to it. 446 /// The function under test must follow the setup - bench - teardown pattern: 447 /// 448 /// # Example 449 /// 450 /// ```rust 451 /// # #[macro_use] extern crate criterion; 452 /// # use criterion::*; 453 /// 454 /// fn bench(c: &mut Criterion) { 455 /// let parameters = vec![1u64, 2u64, 3u64]; 456 /// 457 /// // One-time setup goes here 458 /// c.bench( 459 /// "my_group", 460 /// ParameterizedBenchmark::new( 461 /// "my_function", 462 /// |b, param| b.iter(|| { 463 /// // Code to benchmark using param goes here 464 /// }), 465 /// parameters 466 /// ) 467 /// ); 468 /// } 469 /// 470 /// criterion_group!(benches, bench); 471 /// criterion_main!(benches); 472 /// ``` new<S, F, I>(id: S, f: F, parameters: I) -> ParameterizedBenchmark<T> where S: Into<String>, F: FnMut(&mut Bencher, &T) + 'static, I: IntoIterator<Item = T>,473 pub fn new<S, F, I>(id: S, f: F, parameters: I) -> ParameterizedBenchmark<T> 474 where 475 S: Into<String>, 476 F: FnMut(&mut Bencher, &T) + 'static, 477 I: IntoIterator<Item = T>, 478 { 479 ParameterizedBenchmark { 480 config: PartialBenchmarkConfig::default(), 481 values: parameters.into_iter().collect(), 482 routines: vec![], 483 throughput: None, 484 } 485 .with_function(id, f) 486 } 487 488 /// Create a new parameterized benchmark group and add the given program to it. 489 /// The program under test must implement the following protocol: 490 /// 491 /// * Read the number of iterations from stdin 492 /// * Execute the routine to benchmark that many times 493 /// * Print the elapsed time (in nanoseconds) to stdout 494 /// 495 /// You can pass the argument to the program in any way you choose. 496 /// 497 /// ```rust,no_run 498 /// # use std::io::{self, BufRead}; 499 /// # use std::time::Instant; 500 /// # use std::time::Duration; 501 /// # trait DurationExt { fn to_nanos(&self) -> u64 { 0 } } 502 /// # impl DurationExt for Duration {} 503 /// # use std::env; 504 /// // Example of an external program that implements this protocol 505 /// 506 /// fn main() { 507 /// let stdin = io::stdin(); 508 /// let ref mut stdin = stdin.lock(); 509 /// 510 /// // You might opt to pass the parameter to the external command as 511 /// // an environment variable, command line argument, file on disk, etc. 512 /// let parameter = env::var("PARAMETER").unwrap(); 513 /// 514 /// // For each line in stdin 515 /// for line in stdin.lines() { 516 /// // Parse line as the number of iterations 517 /// let iters: u64 = line.unwrap().trim().parse().unwrap(); 518 /// 519 /// // Setup 520 /// 521 /// // Benchmark 522 /// let start = Instant::now(); 523 /// // Execute the routine "iters" times 524 /// for _ in 0..iters { 525 /// // Code to benchmark using the parameter goes here 526 /// } 527 /// let elapsed = start.elapsed(); 528 /// 529 /// // Teardown 530 /// 531 /// // Report elapsed time in nanoseconds to stdout 532 /// println!("{}", elapsed.to_nanos()); 533 /// } 534 /// } 535 /// ``` 536 #[deprecated( 537 since = "0.2.6", 538 note = "External program benchmarks were rarely used and are awkward to maintain, so they are scheduled for deletion in 0.3.0" 539 )] 540 #[allow(deprecated)] new_external<S, F, I>(id: S, program: F, parameters: I) -> ParameterizedBenchmark<T> where S: Into<String>, F: FnMut(&T) -> Command + 'static, I: IntoIterator<Item = T>,541 pub fn new_external<S, F, I>(id: S, program: F, parameters: I) -> ParameterizedBenchmark<T> 542 where 543 S: Into<String>, 544 F: FnMut(&T) -> Command + 'static, 545 I: IntoIterator<Item = T>, 546 { 547 ParameterizedBenchmark { 548 config: PartialBenchmarkConfig::default(), 549 routines: vec![], 550 values: parameters.into_iter().collect(), 551 throughput: None, 552 } 553 .with_program(id, program) 554 } 555 with_functions( functions: Vec<NamedRoutine<T>>, parameters: Vec<T>, ) -> ParameterizedBenchmark<T>556 pub(crate) fn with_functions( 557 functions: Vec<NamedRoutine<T>>, 558 parameters: Vec<T>, 559 ) -> ParameterizedBenchmark<T> { 560 ParameterizedBenchmark { 561 config: PartialBenchmarkConfig::default(), 562 values: parameters, 563 routines: functions, 564 throughput: None, 565 } 566 } 567 568 /// Add a function to the benchmark group. 569 /// 570 /// # Example 571 /// 572 /// ``` 573 /// # use criterion::ParameterizedBenchmark; 574 /// ParameterizedBenchmark::new("times 10", |b, i| b.iter(|| i * 10), vec![1, 2, 3]) 575 /// .with_function("times 20", |b, i| b.iter(|| i * 20)); 576 /// ``` with_function<S, F>(mut self, id: S, f: F) -> ParameterizedBenchmark<T> where S: Into<String>, F: FnMut(&mut Bencher, &T) + 'static,577 pub fn with_function<S, F>(mut self, id: S, f: F) -> ParameterizedBenchmark<T> 578 where 579 S: Into<String>, 580 F: FnMut(&mut Bencher, &T) + 'static, 581 { 582 let routine = NamedRoutine { 583 id: id.into(), 584 f: Box::new(RefCell::new(Function::new(f))), 585 }; 586 self.routines.push(routine); 587 self 588 } 589 590 /// Add an external program to the benchmark group. 591 /// 592 /// # Example 593 /// 594 /// ``` 595 /// # use criterion::ParameterizedBenchmark; 596 /// # use std::process::Command; 597 /// ParameterizedBenchmark::new("internal", |b, i| b.iter(|| i * 10), vec![1, 2, 3]) 598 /// .with_program("external", |i| { 599 /// let mut command = Command::new("my_external_benchmark"); 600 /// command.arg(format!("{:?}", i)); 601 /// command 602 /// }); 603 /// ``` 604 #[deprecated( 605 since = "0.2.6", 606 note = "External program benchmarks were rarely used and are awkward to maintain, so they are scheduled for deletion in 0.3.0" 607 )] with_program<S, F>(mut self, id: S, program: F) -> ParameterizedBenchmark<T> where S: Into<String>, F: FnMut(&T) -> Command + 'static,608 pub fn with_program<S, F>(mut self, id: S, program: F) -> ParameterizedBenchmark<T> 609 where 610 S: Into<String>, 611 F: FnMut(&T) -> Command + 'static, 612 { 613 let factory = CommandFactory::new(program); 614 let routine = NamedRoutine { 615 id: id.into(), 616 f: Box::new(RefCell::new(factory)), 617 }; 618 self.routines.push(routine); 619 self 620 } 621 622 /// Use the given function to calculate the input size for a given input. 623 /// 624 /// # Example 625 /// 626 /// ``` 627 /// # use criterion::{ParameterizedBenchmark, Throughput}; 628 /// # use std::process::Command; 629 /// ParameterizedBenchmark::new("strlen", |b, s| b.iter(|| s.len()), vec!["foo", "lorem ipsum"]) 630 /// .throughput(|s| Throughput::Bytes(s.len() as u32)); 631 /// ``` throughput<F>(mut self, throughput: F) -> ParameterizedBenchmark<T> where F: Fn(&T) -> Throughput + 'static,632 pub fn throughput<F>(mut self, throughput: F) -> ParameterizedBenchmark<T> 633 where 634 F: Fn(&T) -> Throughput + 'static, 635 { 636 self.throughput = Some(Box::new(throughput)); 637 self 638 } 639 } 640 impl<T> BenchmarkDefinition for ParameterizedBenchmark<T> 641 where 642 T: Debug + 'static, 643 { run(self, group_id: &str, c: &Criterion)644 fn run(self, group_id: &str, c: &Criterion) { 645 let report_context = ReportContext { 646 output_directory: c.output_directory.clone(), 647 plotting: c.plotting, 648 plot_config: self.config.plot_config.clone(), 649 test_mode: c.test_mode, 650 }; 651 652 let config = self.config.to_complete(&c.config); 653 let num_parameters = self.values.len(); 654 let num_routines = self.routines.len(); 655 656 let mut all_ids = vec![]; 657 let mut any_matched = false; 658 let mut all_directories = HashSet::new(); 659 let mut all_titles = HashSet::new(); 660 661 for routine in self.routines { 662 for value in &self.values { 663 let function_id = if num_routines == 1 && group_id == routine.id { 664 None 665 } else { 666 Some(routine.id.clone()) 667 }; 668 669 let value_str = if num_parameters == 1 { 670 None 671 } else { 672 Some(format!("{:?}", value)) 673 }; 674 675 let throughput = self.throughput.as_ref().map(|func| func(value)); 676 let mut id = BenchmarkId::new( 677 group_id.to_owned(), 678 function_id, 679 value_str, 680 throughput.clone(), 681 ); 682 683 id.ensure_directory_name_unique(&all_directories); 684 all_directories.insert(id.as_directory_name().to_owned()); 685 id.ensure_title_unique(&all_titles); 686 all_titles.insert(id.as_title().to_owned()); 687 688 if c.filter_matches(id.id()) { 689 any_matched = true; 690 691 analysis::common( 692 &id, 693 &mut *routine.f.borrow_mut(), 694 &config, 695 c, 696 &report_context, 697 value, 698 throughput, 699 ); 700 } 701 702 all_ids.push(id); 703 } 704 } 705 706 if all_ids.len() > 1 && any_matched && c.profile_time.is_none() && !c.test_mode { 707 c.report.summarize(&report_context, &all_ids); 708 } 709 if any_matched { 710 println!(); 711 } 712 } 713 } 714