1#![warn(clippy::doc_markdown, missing_docs)]
19#![warn(bare_trait_objects)]
20#![allow(
21 clippy::just_underscores_and_digits, clippy::transmute_ptr_to_ptr, )]
24
25#[cfg(all(feature = "rayon", target_arch = "wasm32"))]
26compile_error!("Rayon cannot be used when targeting wasi32. Try disabling default features.");
27
28use {
29 regex::Regex,
30 serde::{Deserialize, Serialize},
31};
32
33#[macro_use]
36mod macros_private;
37#[macro_use]
38mod analysis;
39mod benchmark;
40#[macro_use]
41mod benchmark_group;
42pub mod async_executor;
43mod bencher;
44mod connection;
45#[cfg(feature = "csv_output")]
46mod csv_report;
47mod error;
48mod estimate;
49mod format;
50mod fs;
51mod html;
52mod kde;
53mod macros;
54pub mod measurement;
55mod plot;
56pub mod profiler;
57mod report;
58mod routine;
59mod stats;
60
61use std::{
62 cell::RefCell,
63 collections::HashSet,
64 env,
65 io::{stdout, IsTerminal},
66 net::TcpStream,
67 path::{Path, PathBuf},
68 process::Command,
69 sync::{Mutex, MutexGuard},
70 time::Duration,
71};
72
73use {
74 criterion_plot::{Version, VersionError},
75 std::sync::OnceLock,
76};
77
78#[cfg(feature = "plotters")]
79use crate::plot::PlottersBackend;
80use crate::{
81 benchmark::BenchmarkConfig,
82 connection::{Connection, OutgoingMessage},
83 html::Html,
84 measurement::{Measurement, WallTime},
85 plot::{Gnuplot, Plotter},
86 profiler::{ExternalProfiler, Profiler},
87 report::{BencherReport, CliReport, CliVerbosity, Report, ReportContext, Reports},
88};
89
90#[cfg(feature = "async")]
91pub use crate::bencher::AsyncBencher;
92pub use crate::{
93 bencher::Bencher,
94 benchmark_group::{BenchmarkGroup, BenchmarkId},
95};
96
97fn gnuplot_version() -> &'static Result<Version, VersionError> {
98 static GNUPLOT_VERSION: OnceLock<Result<Version, VersionError>> = OnceLock::new();
99
100 GNUPLOT_VERSION.get_or_init(criterion_plot::version)
101}
102
103fn default_plotting_backend() -> &'static PlottingBackend {
104 static DEFAULT_PLOTTING_BACKEND: OnceLock<PlottingBackend> = OnceLock::new();
105
106 DEFAULT_PLOTTING_BACKEND.get_or_init(|| match gnuplot_version() {
107 Ok(_) => PlottingBackend::Gnuplot,
108 #[cfg(feature = "plotters")]
109 Err(e) => {
110 match e {
111 VersionError::Exec(_) => eprintln!("Gnuplot not found, using plotters backend"),
112 e => eprintln!(
113 "Gnuplot not found or not usable, using plotters backend\n{}",
114 e
115 ),
116 };
117 PlottingBackend::Plotters
118 }
119 #[cfg(not(feature = "plotters"))]
120 Err(_) => PlottingBackend::None,
121 })
122}
123
124fn cargo_criterion_connection() -> &'static Option<Mutex<Connection>> {
125 static CARGO_CRITERION_CONNECTION: OnceLock<Option<Mutex<Connection>>> = OnceLock::new();
126
127 CARGO_CRITERION_CONNECTION.get_or_init(|| match std::env::var("CARGO_CRITERION_PORT") {
128 Ok(port_str) => {
129 let port: u16 = port_str.parse().ok()?;
130 let stream = TcpStream::connect(("localhost", port)).ok()?;
131 Some(Mutex::new(Connection::new(stream).ok()?))
132 }
133 Err(_) => None,
134 })
135}
136
137fn default_output_directory() -> &'static PathBuf {
138 static DEFAULT_OUTPUT_DIRECTORY: OnceLock<PathBuf> = OnceLock::new();
139
140 DEFAULT_OUTPUT_DIRECTORY.get_or_init(|| {
141 if let Some(value) = env::var_os("CRITERION_HOME") {
147 PathBuf::from(value)
148 } else if let Some(path) = cargo_target_directory() {
149 path.join("criterion")
150 } else {
151 PathBuf::from("target/criterion")
152 }
153 })
154}
155
156fn debug_enabled() -> bool {
157 static DEBUG_ENABLED: OnceLock<bool> = OnceLock::new();
158
159 *DEBUG_ENABLED.get_or_init(|| std::env::var_os("CRITERION_DEBUG").is_some())
160}
161
162#[deprecated(note = "use `std::hint::black_box()` instead")]
165pub fn black_box<T>(dummy: T) -> T {
166 std::hint::black_box(dummy)
167}
168
169#[derive(Debug, Eq, PartialEq, Copy, Hash, Clone)]
191pub enum BatchSize {
192 SmallInput,
199
200 LargeInput,
207
208 PerIteration,
217
218 NumBatches(u64),
224
225 NumIterations(u64),
231
232 #[doc(hidden)]
233 __NonExhaustive,
234}
235impl BatchSize {
236 fn iters_per_batch(self, iters: u64) -> u64 {
243 match self {
244 BatchSize::SmallInput => (iters + 10 - 1) / 10,
245 BatchSize::LargeInput => (iters + 1000 - 1) / 1000,
246 BatchSize::PerIteration => 1,
247 BatchSize::NumBatches(batches) => (iters + batches - 1) / batches,
248 BatchSize::NumIterations(size) => size,
249 BatchSize::__NonExhaustive => panic!("__NonExhaustive is not a valid BatchSize."),
250 }
251 }
252}
253
254#[derive(Debug, Clone, Copy)]
256pub enum Baseline {
257 CompareLenient,
260 CompareStrict,
263 Save,
266 Discard,
268}
269
270#[derive(Debug, Clone, Copy)]
274pub enum PlottingBackend {
275 Gnuplot,
278 Plotters,
281 None,
283}
284impl PlottingBackend {
285 fn create_plotter(&self) -> Option<Box<dyn Plotter>> {
286 match self {
287 PlottingBackend::Gnuplot => Some(Box::<Gnuplot>::default()),
288 #[cfg(feature = "plotters")]
289 PlottingBackend::Plotters => Some(Box::<PlottersBackend>::default()),
290 #[cfg(not(feature = "plotters"))]
291 PlottingBackend::Plotters => panic!("Criterion was built without plotters support."),
292 PlottingBackend::None => None,
293 }
294 }
295}
296
297#[derive(Debug, Clone)]
298pub(crate) enum Mode {
300 Benchmark,
302 List(ListFormat),
304 Test,
306 Profile(Duration),
308}
309impl Mode {
310 pub fn is_benchmark(&self) -> bool {
311 matches!(self, Mode::Benchmark)
312 }
313
314 pub fn is_terse(&self) -> bool {
315 matches!(self, Mode::List(ListFormat::Terse))
316 }
317}
318
319#[derive(Debug, Default, Clone)]
320pub(crate) enum ListFormat {
322 #[default]
324 Pretty,
325 Terse,
328}
329
330#[derive(Clone, Debug)]
332pub enum BenchmarkFilter {
333 AcceptAll,
335 Regex(Regex),
337 Exact(String),
339 RejectAll,
341}
342
343pub struct Criterion<M: Measurement = WallTime> {
358 config: BenchmarkConfig,
359 filter: BenchmarkFilter,
360 report: Reports,
361 output_directory: PathBuf,
362 baseline_directory: String,
363 baseline: Baseline,
364 load_baseline: Option<String>,
365 all_directories: HashSet<String>,
366 all_titles: HashSet<String>,
367 measurement: M,
368 profiler: Box<RefCell<dyn Profiler>>,
369 connection: Option<MutexGuard<'static, Connection>>,
370 mode: Mode,
371}
372
373fn cargo_target_directory() -> Option<PathBuf> {
376 #[derive(Deserialize)]
377 struct Metadata {
378 target_directory: PathBuf,
379 }
380
381 env::var_os("CARGO_TARGET_DIR")
382 .map(PathBuf::from)
383 .or_else(|| {
384 let output = Command::new(env::var_os("CARGO")?)
385 .args(["metadata", "--format-version", "1"])
386 .output()
387 .ok()?;
388 let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?;
389 Some(metadata.target_directory)
390 })
391}
392
393impl Default for Criterion {
394 fn default() -> Criterion {
406 let reports = Reports {
407 cli_enabled: true,
408 cli: CliReport::new(false, false, CliVerbosity::Normal),
409 bencher_enabled: false,
410 bencher: BencherReport,
411 html: default_plotting_backend().create_plotter().map(Html::new),
412 csv_enabled: cfg!(feature = "csv_output"),
413 };
414
415 let mut criterion = Criterion {
416 config: BenchmarkConfig {
417 confidence_level: 0.95,
418 measurement_time: Duration::from_secs(5),
419 noise_threshold: 0.01,
420 nresamples: 100_000,
421 sample_size: 100,
422 significance_level: 0.05,
423 warm_up_time: Duration::from_secs(3),
424 sampling_mode: SamplingMode::Auto,
425 quick_mode: false,
426 },
427 filter: BenchmarkFilter::AcceptAll,
428 report: reports,
429 baseline_directory: "base".to_owned(),
430 baseline: Baseline::Save,
431 load_baseline: None,
432 output_directory: default_output_directory().clone(),
433 all_directories: HashSet::new(),
434 all_titles: HashSet::new(),
435 measurement: WallTime,
436 profiler: Box::new(RefCell::new(ExternalProfiler)),
437 connection: cargo_criterion_connection()
438 .as_ref()
439 .map(|mtx| mtx.lock().unwrap()),
440 mode: Mode::Benchmark,
441 };
442
443 if criterion.connection.is_some() {
444 criterion.report.cli_enabled = false;
446 criterion.report.bencher_enabled = false;
447 criterion.report.csv_enabled = false;
448 criterion.report.html = None;
449 }
450 criterion
451 }
452}
453
454impl<M: Measurement> Criterion<M> {
455 pub fn with_measurement<M2: Measurement>(self, m: M2) -> Criterion<M2> {
458 Criterion {
460 config: self.config,
461 filter: self.filter,
462 report: self.report,
463 baseline_directory: self.baseline_directory,
464 baseline: self.baseline,
465 load_baseline: self.load_baseline,
466 output_directory: self.output_directory,
467 all_directories: self.all_directories,
468 all_titles: self.all_titles,
469 measurement: m,
470 profiler: self.profiler,
471 connection: self.connection,
472 mode: self.mode,
473 }
474 }
475
476 #[must_use]
477 pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
480 Criterion {
481 profiler: Box::new(RefCell::new(p)),
482 ..self
483 }
484 }
485
486 #[must_use]
487 pub fn plotting_backend(mut self, backend: PlottingBackend) -> Criterion<M> {
494 if let PlottingBackend::Gnuplot = backend {
495 assert!(
496 !gnuplot_version().is_err(),
497 "Gnuplot plotting backend was requested, but gnuplot is not available. \
498 To continue, either install Gnuplot or allow Criterion.rs to fall back \
499 to using plotters."
500 );
501 }
502
503 self.report.html = backend.create_plotter().map(Html::new);
504 self
505 }
506
507 #[must_use]
508 pub fn sample_size(mut self, n: usize) -> Criterion<M> {
519 assert!(n >= 10);
520
521 self.config.sample_size = n;
522 self
523 }
524
525 #[must_use]
526 pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
532 assert!(dur.as_nanos() > 0);
533
534 self.config.warm_up_time = dur;
535 self
536 }
537
538 #[must_use]
539 pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
550 assert!(dur.as_nanos() > 0);
551
552 self.config.measurement_time = dur;
553 self
554 }
555
556 #[must_use]
557 pub fn nresamples(mut self, n: usize) -> Criterion<M> {
569 assert!(n > 0);
570 if n <= 1000 {
571 eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
572 }
573
574 self.config.nresamples = n;
575 self
576 }
577
578 #[must_use]
579 pub fn noise_threshold(mut self, threshold: f64) -> Criterion<M> {
592 assert!(threshold >= 0.0);
593
594 self.config.noise_threshold = threshold;
595 self
596 }
597
598 #[must_use]
599 pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
608 assert!(cl > 0.0 && cl < 1.0);
609 if cl < 0.5 {
610 eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
611 }
612
613 self.config.confidence_level = cl;
614 self
615 }
616
617 #[must_use]
618 pub fn significance_level(mut self, sl: f64) -> Criterion<M> {
639 assert!(sl > 0.0 && sl < 1.0);
640
641 self.config.significance_level = sl;
642 self
643 }
644
645 #[must_use]
646 pub fn with_plots(mut self) -> Criterion<M> {
648 if self.connection.is_none() && self.report.html.is_none() {
650 let default_backend = default_plotting_backend().create_plotter();
651 if let Some(backend) = default_backend {
652 self.report.html = Some(Html::new(backend));
653 } else {
654 panic!("Cannot find a default plotting backend!");
655 }
656 }
657 self
658 }
659
660 #[must_use]
661 pub fn without_plots(mut self) -> Criterion<M> {
663 self.report.html = None;
664 self
665 }
666
667 #[must_use]
668 pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
670 self.baseline_directory = baseline;
671 self.baseline = Baseline::Save;
672 self
673 }
674
675 #[must_use]
676 pub fn retain_baseline(mut self, baseline: String, strict: bool) -> Criterion<M> {
678 self.baseline_directory = baseline;
679 self.baseline = if strict {
680 Baseline::CompareStrict
681 } else {
682 Baseline::CompareLenient
683 };
684 self
685 }
686
687 #[must_use]
688 pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
693 let filter_text = filter.into();
694 let filter = Regex::new(&filter_text).unwrap_or_else(|err| {
695 panic!(
696 "Unable to parse '{}' as a regular expression: {}",
697 filter_text, err
698 )
699 });
700 self.filter = BenchmarkFilter::Regex(filter);
701
702 self
703 }
704
705 pub fn with_benchmark_filter(mut self, filter: BenchmarkFilter) -> Criterion<M> {
709 self.filter = filter;
710
711 self
712 }
713
714 #[must_use]
715 pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
718 self.report.cli.enable_text_coloring = enabled;
719 self
720 }
721
722 #[must_use]
724 #[doc(hidden)]
725 pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
726 path.clone_into(&mut self.output_directory);
727
728 self
729 }
730
731 #[must_use]
733 #[doc(hidden)]
734 pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
735 match profile_time {
736 Some(time) => self.mode = Mode::Profile(time),
737 None => self.mode = Mode::Benchmark,
738 }
739
740 self
741 }
742
743 #[doc(hidden)]
745 pub fn final_summary(&self) {
746 if !self.mode.is_benchmark() {
747 return;
748 }
749
750 let report_context = ReportContext {
751 output_directory: self.output_directory.clone(),
752 plot_config: PlotConfiguration::default(),
753 };
754
755 self.report.final_summary(&report_context);
756 }
757
758 #[must_use]
761 #[allow(clippy::cognitive_complexity)]
762 pub fn configure_from_args(mut self) -> Criterion<M> {
763 use clap::{value_parser, Arg, Command};
764 let matches = Command::new("Criterion Benchmark")
765 .arg(Arg::new("FILTER")
766 .help("Skip benchmarks whose names do not contain FILTER.")
767 .index(1))
768 .arg(Arg::new("color")
769 .short('c')
770 .long("color")
771 .alias("colour")
772 .value_parser(["auto", "always", "never"])
773 .default_value("auto")
774 .help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
775 .arg(Arg::new("verbose")
776 .short('v')
777 .long("verbose")
778 .num_args(0)
779 .help("Print additional statistical information."))
780 .arg(Arg::new("quiet")
781 .long("quiet")
782 .num_args(0)
783 .conflicts_with("verbose")
784 .help("Print only the benchmark results."))
785 .arg(Arg::new("noplot")
786 .short('n')
787 .long("noplot")
788 .num_args(0)
789 .help("Disable plot and HTML generation."))
790 .arg(Arg::new("save-baseline")
791 .short('s')
792 .long("save-baseline")
793 .default_value("base")
794 .help("Save results under a named baseline."))
795 .arg(Arg::new("discard-baseline")
796 .long("discard-baseline")
797 .num_args(0)
798 .conflicts_with_all(["save-baseline", "baseline", "baseline-lenient"])
799 .help("Discard benchmark results."))
800 .arg(Arg::new("baseline")
801 .short('b')
802 .long("baseline")
803 .conflicts_with_all(["save-baseline", "baseline-lenient"])
804 .help("Compare to a named baseline. If any benchmarks do not have the specified baseline this command fails."))
805 .arg(Arg::new("baseline-lenient")
806 .long("baseline-lenient")
807 .conflicts_with_all(["save-baseline", "baseline"])
808 .help("Compare to a named baseline. If any benchmarks do not have the specified baseline then just those benchmarks are not compared against the baseline while every other benchmark is compared against the baseline."))
809 .arg(Arg::new("list")
810 .long("list")
811 .num_args(0)
812 .help("List all benchmarks")
813 .conflicts_with_all(["test", "profile-time"]))
814 .arg(Arg::new("format")
815 .long("format")
816 .value_parser(["pretty", "terse"])
817 .default_value("pretty")
818 .help("Output formatting"))
821 .arg(Arg::new("ignored")
822 .long("ignored")
823 .num_args(0)
824 .help("List or run ignored benchmarks (currently means skip all benchmarks)"))
825 .arg(Arg::new("exact")
826 .long("exact")
827 .num_args(0)
828 .help("Run benchmarks that exactly match the provided filter"))
829 .arg(Arg::new("profile-time")
830 .long("profile-time")
831 .value_parser(value_parser!(f64))
832 .help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
833 .conflicts_with_all(["test", "list"]))
834 .arg(Arg::new("load-baseline")
835 .long("load-baseline")
836 .conflicts_with("profile-time")
837 .requires("baseline")
838 .help("Load a previous baseline instead of sampling new data."))
839 .arg(Arg::new("sample-size")
840 .long("sample-size")
841 .value_parser(value_parser!(usize))
842 .help(format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
843 .arg(Arg::new("warm-up-time")
844 .long("warm-up-time")
845 .value_parser(value_parser!(f64))
846 .help(format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
847 .arg(Arg::new("measurement-time")
848 .long("measurement-time")
849 .value_parser(value_parser!(f64))
850 .help(format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
851 .arg(Arg::new("nresamples")
852 .long("nresamples")
853 .value_parser(value_parser!(usize))
854 .help(format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
855 .arg(Arg::new("noise-threshold")
856 .long("noise-threshold")
857 .value_parser(value_parser!(f64))
858 .help(format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
859 .arg(Arg::new("confidence-level")
860 .long("confidence-level")
861 .value_parser(value_parser!(f64))
862 .help(format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
863 .arg(Arg::new("significance-level")
864 .long("significance-level")
865 .value_parser(value_parser!(f64))
866 .help(format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
867 .arg(Arg::new("quick")
868 .long("quick")
869 .num_args(0)
870 .conflicts_with("sample-size")
871 .help(format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode)))
872 .arg(Arg::new("test")
873 .hide(true)
874 .long("test")
875 .num_args(0)
876 .help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
877 .conflicts_with_all(["list", "profile-time"]))
878 .arg(Arg::new("bench")
879 .hide(true)
880 .long("bench")
881 .num_args(0))
882 .arg(Arg::new("plotting-backend")
883 .long("plotting-backend")
884 .value_parser(["gnuplot", "plotters"])
885 .help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
886 .arg(Arg::new("output-format")
887 .long("output-format")
888 .value_parser(["criterion", "bencher"])
889 .default_value("criterion")
890 .help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
891 .arg(Arg::new("nocapture")
892 .long("nocapture")
893 .num_args(0)
894 .hide(true)
895 .help("Ignored, but added for compatibility with libtest."))
896 .arg(Arg::new("show-output")
897 .long("show-output")
898 .num_args(0)
899 .hide(true)
900 .help("Ignored, but added for compatibility with libtest."))
901 .arg(Arg::new("include-ignored")
902 .long("include-ignored")
903 .num_args(0)
904 .hide(true)
905 .help("Ignored, but added for compatibility with libtest."))
906 .arg(Arg::new("version")
907 .hide(true)
908 .short('V')
909 .long("version")
910 .num_args(0))
911 .after_help("
912This executable is a Criterion.rs benchmark.
913See https://github.com/criterion-rs/criterion.rs for more details.
914
915To enable debug output, define the environment variable CRITERION_DEBUG.
916Criterion.rs will output more debug information and will save the gnuplot
917scripts alongside the generated plots.
918
919To test that the benchmarks work, run `cargo test --benches`
920
921NOTE: If you see an 'unrecognized option' error using any of the options above, see:
922https://criterion-rs.github.io/book/faq.html
923")
924 .get_matches();
925
926 if self.connection.is_some() {
927 if let Some(color) = matches.get_one::<String>("color") {
928 if color != "auto" {
929 eprintln!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
930 }
931 }
932 if matches.get_flag("verbose") {
933 eprintln!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
934 }
935 if matches.get_flag("noplot") {
936 eprintln!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
937 }
938 if let Some(backend) = matches.get_one::<String>("plotting-backend") {
939 eprintln!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
940 }
941 if let Some(format) = matches.get_one::<String>("output-format") {
942 if format != "criterion" {
943 eprintln!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
944 }
945 }
946
947 if matches.contains_id("baseline")
948 || matches
949 .get_one::<String>("save-baseline")
950 .is_some_and(|base| base != "base")
951 || matches.contains_id("load-baseline")
952 {
953 eprintln!("Error: baselines are not supported when running with cargo-criterion.");
954 std::process::exit(1);
955 }
956 }
957
958 let bench = matches.get_flag("bench");
959 let test = matches.get_flag("test");
960 let test_mode = match (bench, test) {
961 (true, true) => true, (true, false) => false, (false, _) => true, };
965
966 self.mode = if matches.get_flag("list") {
967 let list_format = match matches
968 .get_one::<String>("format")
969 .expect("a default value was provided for this")
970 .as_str()
971 {
972 "pretty" => ListFormat::Pretty,
973 "terse" => ListFormat::Terse,
974 other => unreachable!(
975 "unrecognized value for --format that isn't part of possible-values: {}",
976 other
977 ),
978 };
979 Mode::List(list_format)
980 } else if test_mode {
981 Mode::Test
982 } else if let Some(&num_seconds) = matches.get_one("profile-time") {
983 if num_seconds < 1.0 {
984 eprintln!("Profile time must be at least one second.");
985 std::process::exit(1);
986 }
987
988 Mode::Profile(Duration::from_secs_f64(num_seconds))
989 } else {
990 Mode::Benchmark
991 };
992
993 if !self.mode.is_benchmark() {
995 self.connection = None;
996 }
997
998 let filter = if matches.get_flag("ignored") {
999 BenchmarkFilter::RejectAll
1001 } else if let Some(filter) = matches.get_one::<String>("FILTER") {
1002 if matches.get_flag("exact") {
1003 BenchmarkFilter::Exact(filter.to_owned())
1004 } else {
1005 let regex = Regex::new(filter).unwrap_or_else(|err| {
1006 panic!(
1007 "Unable to parse '{}' as a regular expression: {}",
1008 filter, err
1009 )
1010 });
1011 BenchmarkFilter::Regex(regex)
1012 }
1013 } else {
1014 BenchmarkFilter::AcceptAll
1015 };
1016 self = self.with_benchmark_filter(filter);
1017
1018 match matches.get_one("plotting-backend").map(String::as_str) {
1019 Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot),
1021 Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters),
1022 Some(val) => panic!("Unexpected plotting backend '{}'", val),
1023 None => {}
1024 }
1025
1026 if matches.get_flag("noplot") {
1027 self = self.without_plots();
1028 }
1029
1030 if let Some(dir) = matches.get_one::<String>("save-baseline") {
1031 self.baseline = Baseline::Save;
1032 dir.clone_into(&mut self.baseline_directory);
1033 }
1034 if matches.get_flag("discard-baseline") {
1035 self.baseline = Baseline::Discard;
1036 }
1037 if let Some(dir) = matches.get_one::<String>("baseline") {
1038 self.baseline = Baseline::CompareStrict;
1039 dir.clone_into(&mut self.baseline_directory);
1040 }
1041 if let Some(dir) = matches.get_one::<String>("baseline-lenient") {
1042 self.baseline = Baseline::CompareLenient;
1043 dir.clone_into(&mut self.baseline_directory);
1044 }
1045
1046 if self.connection.is_some() {
1047 self.report.cli_enabled = false;
1049 self.report.bencher_enabled = false;
1050 self.report.csv_enabled = false;
1051 self.report.html = None;
1052 } else {
1053 match matches.get_one("output-format").map(String::as_str) {
1054 Some("bencher") => {
1055 self.report.bencher_enabled = true;
1056 self.report.cli_enabled = false;
1057 }
1058 _ => {
1059 let verbose = matches.get_flag("verbose");
1060 let verbosity = if verbose {
1061 CliVerbosity::Verbose
1062 } else if matches.get_flag("quiet") {
1063 CliVerbosity::Quiet
1064 } else {
1065 CliVerbosity::Normal
1066 };
1067 let stdout_isatty = stdout().is_terminal();
1068 let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
1069 let enable_text_coloring;
1070 match matches.get_one("color").map(String::as_str) {
1071 Some("always") => {
1072 enable_text_coloring = true;
1073 }
1074 Some("never") => {
1075 enable_text_coloring = false;
1076 enable_text_overwrite = false;
1077 }
1078 _ => enable_text_coloring = stdout_isatty,
1079 };
1080 self.report.bencher_enabled = false;
1081 self.report.cli_enabled = true;
1082 self.report.cli =
1083 CliReport::new(enable_text_overwrite, enable_text_coloring, verbosity);
1084 }
1085 };
1086 }
1087
1088 if let Some(dir) = matches.get_one::<String>("load-baseline") {
1089 self.load_baseline = Some(dir.to_owned());
1090 }
1091
1092 if let Some(&num_size) = matches.get_one("sample-size") {
1093 assert!(num_size >= 10);
1094 self.config.sample_size = num_size;
1095 }
1096 if let Some(&num_seconds) = matches.get_one("warm-up-time") {
1097 let dur = std::time::Duration::from_secs_f64(num_seconds);
1098 assert!(dur.as_nanos() > 0);
1099
1100 self.config.warm_up_time = dur;
1101 }
1102 if let Some(&num_seconds) = matches.get_one("measurement-time") {
1103 let dur = std::time::Duration::from_secs_f64(num_seconds);
1104 assert!(dur.as_nanos() > 0);
1105
1106 self.config.measurement_time = dur;
1107 }
1108 if let Some(&num_resamples) = matches.get_one("nresamples") {
1109 assert!(num_resamples > 0);
1110
1111 self.config.nresamples = num_resamples;
1112 }
1113 if let Some(&num_noise_threshold) = matches.get_one("noise-threshold") {
1114 assert!(num_noise_threshold > 0.0);
1115
1116 self.config.noise_threshold = num_noise_threshold;
1117 }
1118 if let Some(&num_confidence_level) = matches.get_one("confidence-level") {
1119 assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
1120
1121 self.config.confidence_level = num_confidence_level;
1122 }
1123 if let Some(&num_significance_level) = matches.get_one("significance-level") {
1124 assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
1125
1126 self.config.significance_level = num_significance_level;
1127 }
1128
1129 if matches.get_flag("quick") {
1130 self.config.quick_mode = true;
1131 }
1132
1133 self
1134 }
1135
1136 fn filter_matches(&self, id: &str) -> bool {
1137 match &self.filter {
1138 BenchmarkFilter::AcceptAll => true,
1139 BenchmarkFilter::Regex(regex) => regex.is_match(id),
1140 BenchmarkFilter::Exact(exact) => id == exact,
1141 BenchmarkFilter::RejectAll => false,
1142 }
1143 }
1144
1145 fn should_save_baseline(&self) -> bool {
1148 self.connection.is_none()
1149 && self.load_baseline.is_none()
1150 && !matches!(self.baseline, Baseline::Discard)
1151 }
1152
1153 pub fn benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M> {
1176 let group_name = group_name.into();
1177 assert!(!group_name.is_empty(), "Group name must not be empty.");
1178
1179 if let Some(conn) = &self.connection {
1180 conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name })
1181 .unwrap();
1182 }
1183
1184 BenchmarkGroup::new(self, group_name)
1185 }
1186}
1187impl<M> Criterion<M>
1188where
1189 M: Measurement + 'static,
1190{
1191 pub fn bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion<M>
1213 where
1214 F: FnMut(&mut Bencher<'_, M>),
1215 {
1216 self.benchmark_group(id)
1217 .bench_function(BenchmarkId::no_function(), f);
1218 self
1219 }
1220
1221 pub fn bench_with_input<F, I>(&mut self, id: BenchmarkId, input: &I, f: F) -> &mut Criterion<M>
1244 where
1245 F: FnMut(&mut Bencher<'_, M>, &I),
1246 {
1247 let group_name = id.function_name.expect(
1251 "Cannot use BenchmarkId::from_parameter with Criterion::bench_with_input. \
1252 Consider using a BenchmarkGroup or BenchmarkId::new instead.",
1253 );
1254 let parameter = id.parameter.unwrap();
1256 self.benchmark_group(group_name).bench_with_input(
1257 BenchmarkId::no_function_with_input(parameter),
1258 input,
1259 f,
1260 );
1261 self
1262 }
1263}
1264
1265#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
1270pub enum Throughput {
1271 Bits(u64),
1275
1276 Bytes(u64),
1280
1281 BytesDecimal(u64),
1285
1286 Elements(u64),
1291
1292 ElementsAndBytes {
1300 elements: u64,
1304 bytes: u64,
1307 },
1308}
1309
1310#[derive(Debug, Default, Clone, Copy)]
1312pub enum AxisScale {
1313 #[default]
1315 Linear,
1316
1317 Logarithmic,
1319}
1320
1321#[derive(Debug, Default, Clone)]
1337pub struct PlotConfiguration {
1338 summary_scale: AxisScale,
1339}
1340
1341impl PlotConfiguration {
1342 #[must_use]
1343 pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration {
1351 self.summary_scale = new_scale;
1352 self
1353 }
1354}
1355
1356#[derive(Debug, Default, Clone, Copy)]
1360pub enum SamplingMode {
1361 #[default]
1364 Auto,
1365
1366 Linear,
1369
1370 Flat,
1375}
1376
1377impl SamplingMode {
1378 pub(crate) fn choose_sampling_mode(
1379 &self,
1380 warmup_mean_execution_time: f64,
1381 sample_count: u64,
1382 target_time: f64,
1383 ) -> ActualSamplingMode {
1384 match self {
1385 SamplingMode::Linear => ActualSamplingMode::Linear,
1386 SamplingMode::Flat => ActualSamplingMode::Flat,
1387 SamplingMode::Auto => {
1388 let total_runs = sample_count * (sample_count + 1) / 2;
1390 let d =
1391 (target_time / warmup_mean_execution_time / total_runs as f64).ceil() as u64;
1392 let expected_ns = total_runs as f64 * d as f64 * warmup_mean_execution_time;
1393
1394 if expected_ns > (2.0 * target_time) {
1395 ActualSamplingMode::Flat
1396 } else {
1397 ActualSamplingMode::Linear
1398 }
1399 }
1400 }
1401 }
1402}
1403
1404#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
1406pub(crate) enum ActualSamplingMode {
1407 Linear,
1408 Flat,
1409}
1410
1411impl ActualSamplingMode {
1412 pub(crate) fn iteration_counts(
1413 &self,
1414 warmup_mean_execution_time: f64,
1415 sample_count: u64,
1416 target_time: &Duration,
1417 ) -> Vec<u64> {
1418 match self {
1419 ActualSamplingMode::Linear => {
1420 let n = sample_count;
1421 let met = warmup_mean_execution_time;
1422 let m_ns = target_time.as_nanos();
1423 let total_runs = n * (n + 1) / 2;
1425 let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
1426 let expected_ns = total_runs as f64 * d as f64 * met;
1427
1428 if d == 1 {
1429 let recommended_sample_size =
1430 ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
1431 let actual_time = Duration::from_nanos(expected_ns as u64);
1432 eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1433 n, target_time, actual_time);
1434
1435 if recommended_sample_size != n {
1436 eprintln!(
1437 ", enable flat sampling, or reduce sample count to {}.",
1438 recommended_sample_size
1439 );
1440 } else {
1441 eprintln!(" or enable flat sampling.");
1442 }
1443 }
1444
1445 (1..(n + 1)).map(|a| a * d).collect::<Vec<u64>>()
1446 }
1447 ActualSamplingMode::Flat => {
1448 let n = sample_count;
1449 let met = warmup_mean_execution_time;
1450 let m_ns = target_time.as_nanos() as f64;
1451 let time_per_sample = m_ns / (n as f64);
1452 let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
1454
1455 let expected_ns = met * (iterations_per_sample * n) as f64;
1456
1457 if iterations_per_sample == 1 {
1458 let recommended_sample_size =
1459 ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
1460 let actual_time = Duration::from_nanos(expected_ns as u64);
1461 eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1462 n, target_time, actual_time);
1463
1464 if recommended_sample_size != n {
1465 eprintln!(", or reduce sample count to {}.", recommended_sample_size);
1466 } else {
1467 eprintln!(".");
1468 }
1469 }
1470
1471 vec![iterations_per_sample; n as usize]
1472 }
1473 }
1474 }
1475
1476 fn is_linear(&self) -> bool {
1477 matches!(self, ActualSamplingMode::Linear)
1478 }
1479
1480 fn recommend_linear_sample_size(target_time: f64, met: f64) -> u64 {
1481 let c = target_time / met;
1489 let sample_size = (-1.0 + (4.0 * c).sqrt()) / 2.0;
1490 let sample_size = sample_size as u64;
1491
1492 let sample_size = (sample_size / 10) * 10;
1494
1495 if sample_size < 10 {
1497 10
1498 } else {
1499 sample_size
1500 }
1501 }
1502
1503 fn recommend_flat_sample_size(target_time: f64, met: f64) -> u64 {
1504 let sample_size = (target_time / met) as u64;
1505
1506 let sample_size = (sample_size / 10) * 10;
1508
1509 if sample_size < 10 {
1511 10
1512 } else {
1513 sample_size
1514 }
1515 }
1516}
1517
1518#[derive(Debug, Serialize, Deserialize)]
1519pub(crate) struct SavedSample {
1520 sampling_mode: ActualSamplingMode,
1521 iters: Vec<f64>,
1522 times: Vec<f64>,
1523}
1524
1525#[doc(hidden)]
1527pub fn runner(benches: &[&dyn Fn()]) {
1528 for bench in benches {
1529 bench();
1530 }
1531 Criterion::default().configure_from_args().final_summary();
1532}