diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/crates/benchmarker/Cargo.toml | 8 | ||||
-rw-r--r-- | src/crates/benchmarker/benches/vec.rs | 19 | ||||
-rw-r--r-- | src/crates/benchmarker/examples/run_vec.rs | 6 | ||||
-rw-r--r-- | src/crates/benchmarker/src/bench.rs | 80 | ||||
-rw-r--r-- | src/crates/benchmarker/src/container.rs | 316 | ||||
-rw-r--r-- | src/crates/benchmarker/src/indexable.rs | 178 | ||||
-rw-r--r-- | src/crates/benchmarker/src/lib.rs | 80 | ||||
-rw-r--r-- | src/crates/benchmarker/src/results.rs | 47 | ||||
-rw-r--r-- | src/crates/benchmarker/src/stack.rs | 122 |
9 files changed, 369 insertions, 487 deletions
diff --git a/src/crates/benchmarker/Cargo.toml b/src/crates/benchmarker/Cargo.toml index 3f542fc..a42127b 100644 --- a/src/crates/benchmarker/Cargo.toml +++ b/src/crates/benchmarker/Cargo.toml @@ -4,6 +4,8 @@ version = "0.1.0" edition = "2021" [dependencies] +criterion = "0.3" + log = { workspace = true } serde = { workspace = true, features = ["derive"] } @@ -13,4 +15,8 @@ rand = { workspace = true } primrose-library = { path = "../library" } [dev-dependencies] -env_logger = { workspace = true }
\ No newline at end of file +env_logger = { workspace = true } + +[[bench]] +name = "vec" +harness = false
\ No newline at end of file diff --git a/src/crates/benchmarker/benches/vec.rs b/src/crates/benchmarker/benches/vec.rs new file mode 100644 index 0000000..8981dcd --- /dev/null +++ b/src/crates/benchmarker/benches/vec.rs @@ -0,0 +1,19 @@ +use criterion::{criterion_group, criterion_main, Criterion}; + +fn run_benches(c: &mut Criterion) { + candelabra_benchmarker::benchmark_container::<Vec<usize>, _>(c, &[8, 256, 1024, 65536]); +} + +criterion_group!(benches, run_benches); + +// criterion_group!( +// benches, +// candelabra_benchmarker::benchmark_indexable::<Vec<usize>, _> +// ); + +// criterion_group!( +// benches, +// candelabra_benchmarker::benchmark_stack::<Vec<usize>, _> +// ); + +criterion_main!(benches); diff --git a/src/crates/benchmarker/examples/run_vec.rs b/src/crates/benchmarker/examples/run_vec.rs index 0c93bbb..c821bdd 100644 --- a/src/crates/benchmarker/examples/run_vec.rs +++ b/src/crates/benchmarker/examples/run_vec.rs @@ -5,10 +5,10 @@ fn main() { to_writer( std::io::stdout(), - &Benchmarker::<Vec<usize>, usize>::with_ns(&[1, 100, 1000]) + &Benchmarker::<Vec<usize>, usize>::with_ns(&[8, 256, 1024, 65536]) .container() - .indexable() - .stack() + // .indexable() + // .stack() .finish(), ) .unwrap(); diff --git a/src/crates/benchmarker/src/bench.rs b/src/crates/benchmarker/src/bench.rs deleted file mode 100644 index 5246ba7..0000000 --- a/src/crates/benchmarker/src/bench.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::{ - cmp, - hint::black_box, - time::{Duration, Instant}, -}; - -use crate::BenchmarkResult; - -/// Benchmark an operation for approx 5 seconds, returning the results. -/// -/// `setup` is used to create the thing `op` acts on, and `undo` is called between each run to undo `op`. -/// If `undo` is invalid, this will return garbage results. -/// -/// Warm-up for the setup is done beforehand. -pub fn benchmark_op<T, R>( - mut setup: impl FnMut() -> T, - mut op: impl FnMut(&mut T) -> R, -) -> BenchmarkResult { - // let loop_end = Instant::now() + Duration::from_secs(5); - let loop_end = Instant::now() + Duration::from_millis(100); - - let mut times = 0; - let mut min = Duration::from_secs(u64::MAX); - let mut max = Duration::from_secs(0); - let mut sum = Duration::from_secs(0); - - while Instant::now() + max < loop_end { - let mut target = setup(); - let duration = time_singular(|| op(&mut target)); - - min = cmp::min(min, duration); - max = cmp::max(max, duration); - sum += duration; - times += 1; - } - - BenchmarkResult { - times, - min, - max, - avg: sum / times as u32, - } -} - -fn time_singular<R>(f: impl FnOnce() -> R) -> Duration { - let start = Instant::now(); - black_box(f()); - let end = Instant::now(); - end - start -} - -#[cfg(test)] -mod tests { - use super::benchmark_op; - use std::time::Duration; - - #[test] - fn benchmark_op_resets_properly() { - benchmark_op( - || false, - |b| { - assert!(!(*b)); - *b = true; - }, - ); - } - - #[test] - fn benchmark_op_times_properly() { - let results = benchmark_op(|| (), |_| std::thread::sleep(Duration::from_millis(5))); - - let avg_millis = results.avg.as_nanos() as f32 / (10.0_f32).powi(6); - dbg!(avg_millis); - - assert!( - (avg_millis - 5.0).abs() < 0.1, - "sleeping for 5ms takes roughly 5ms" - ) - } -} diff --git a/src/crates/benchmarker/src/container.rs b/src/crates/benchmarker/src/container.rs index 9d5fac5..9dc3d94 100644 --- a/src/crates/benchmarker/src/container.rs +++ b/src/crates/benchmarker/src/container.rs @@ -1,74 +1,17 @@ -use std::{any::type_name, collections::HashMap}; - -use log::debug; +use criterion::{BatchSize, BenchmarkId, Criterion}; use primrose_library::traits::Container; use rand::{distributions::Standard, prelude::Distribution, random, thread_rng, Rng}; -use crate::{bench::benchmark_op, Observation, Results}; - -/// Benchmark [`primrose_library::traits::Container`] operations -pub trait ContainerExt<E> { - /// Benchmark at a single `n`. - fn benchmark_container_at(n: usize) -> Results; - - /// Benchmark `len` at a single `n`. - fn benchmark_container_len(n: usize) -> Observation; - - /// Benchmark `contains` at a single `n`. - fn benchmark_container_contains(n: usize) -> Observation; - - /// Benchmark `insert` at a single `n`. - fn benchmark_container_insert(n: usize) -> Observation; - - /// Benchmark `clear` at a single `n`. - fn benchmark_container_clear(n: usize) -> Observation; - - /// Benchmark `remove` at a single `n`. - fn benchmark_container_remove(n: usize) -> Observation; -} - -impl<T, E> ContainerExt<E> for T +pub fn benchmark_container<T, E>(c: &mut Criterion, ns: &[usize]) where T: Container<E> + Default, E: Clone, Standard: Distribution<E>, { - fn benchmark_container_at(n: usize) -> Results { - let mut by_op = HashMap::new(); - - debug!("Benchmarking {} at n = {}", type_name::<T>(), n); - - debug!("...len"); - by_op.insert("len".to_string(), vec![Self::benchmark_container_len(n)]); - debug!("...contains"); - by_op.insert( - "contains".to_string(), - vec![Self::benchmark_container_contains(n)], - ); - debug!("...insert"); - by_op.insert( - "insert".to_string(), - vec![Self::benchmark_container_insert(n)], - ); - debug!("...clear"); - by_op.insert( - "clear".to_string(), - vec![Self::benchmark_container_clear(n)], - ); - debug!("...remove"); - by_op.insert( - "remove".to_string(), - vec![Self::benchmark_container_remove(n)], - ); - debug!("--- done!"); - - Results { by_op } - } - - fn benchmark_container_contains(n: usize) -> Observation { - ( - n, - benchmark_op( + let mut g = c.benchmark_group("contains"); + for n in ns { + g.bench_with_input(BenchmarkId::from_parameter(n), n, |b, &n| { + b.iter_batched_ref( || { // TODO: maybe we should actually just test the worst case? (at the end) // we also don't actually test misses yet. @@ -92,79 +35,178 @@ where (c, chosen) }, |(c, search)| c.contains(search), - ), - ) - } - - fn benchmark_container_len(n: usize) -> Observation { - ( - n, - benchmark_op( - || { - let mut c = T::default(); - for _ in 0..n { - c.insert(random()); - } - c - }, - |c| c.len(), - ), - ) - } - - fn benchmark_container_insert(n: usize) -> Observation { - let setup_closure = || { - let mut c = T::default(); - for _ in 0..n { - c.insert(random()); - } - c - }; - ( - n, - benchmark_op(setup_closure, |c| { - // TODO: rng generation could throw off benchmarks - c.insert(random()) - }), - ) - } - - fn benchmark_container_clear(n: usize) -> Observation { - let setup_closure = || { - let mut c = T::default(); - for _ in 0..n { - c.insert(random()); - } - c - }; - (n, benchmark_op(setup_closure, |c| c.clear())) - } - - fn benchmark_container_remove(n: usize) -> Observation { - ( - n, - benchmark_op( - || { - let mut rng = thread_rng(); - let mut c = T::default(); - - // decide where the element that we will remove will be - let pivot = rng.gen_range(0..n); - - // insert the element at pivot, and keep track of what it is - for _ in 0..pivot { - c.insert(random()); - } - let chosen = rng.gen(); - c.insert(chosen.clone()); - for _ in pivot..n { - c.insert(random()); - } - - (c, chosen) - }, - |(c, chosen)| c.remove(chosen.clone()), - ), - ) + BatchSize::LargeInput, + ); + }); } } + +// /// Benchmark [`primrose_library::traits::Container`] operations +// pub trait ContainerExt<E> { +// /// Benchmark at a single `n`. +// fn benchmark_container_at(n: usize) -> Results; + +// /// Benchmark `len` at a single `n`. +// fn benchmark_container_len(n: usize) -> Observation; + +// /// Benchmark `contains` at a single `n`. +// fn benchmark_container_contains(n: usize) -> Observation; + +// /// Benchmark `insert` at a single `n`. +// fn benchmark_container_insert(n: usize) -> Observation; + +// /// Benchmark `clear` at a single `n`. +// fn benchmark_container_clear(n: usize) -> Observation; + +// /// Benchmark `remove` at a single `n`. +// fn benchmark_container_remove(n: usize) -> Observation; +// } + +// impl<T, E> ContainerExt<E> for T +// where +// T: Container<E> + Default, +// E: Clone, +// Standard: Distribution<E>, +// { +// fn benchmark_container_at(n: usize) -> Results { +// let mut by_op = HashMap::new(); + +// debug!("Benchmarking {} at n = {}", type_name::<T>(), n); + +// // debug!("...len"); +// // by_op.insert("len".to_string(), vec![Self::benchmark_container_len(n)]); +// debug!("...contains"); +// by_op.insert( +// "contains".to_string(), +// vec![Self::benchmark_container_contains(n)], +// ); +// // debug!("...insert"); +// // by_op.insert( +// // "insert".to_string(), +// // vec![Self::benchmark_container_insert(n)], +// // ); +// // debug!("...clear"); +// // by_op.insert( +// // "clear".to_string(), +// // vec![Self::benchmark_container_clear(n)], +// // ); +// // debug!("...remove"); +// // by_op.insert( +// // "remove".to_string(), +// // vec![Self::benchmark_container_remove(n)], +// // ); +// debug!("--- done!"); + +// Results { by_op } +// } + +// fn benchmark_container_contains(n: usize) -> Observation { +// ( +// n, +// benchmark_op( +// || { +// // TODO: maybe we should actually just test the worst case? (at the end) +// // we also don't actually test misses yet. + +// let mut rng = thread_rng(); +// let mut c = T::default(); + +// // decide where the element that we will search for will be +// let pivot = rng.gen_range(0..n); + +// // insert the element at pivot, and keep track of what it is +// for _ in 0..pivot { +// c.insert(random()); +// } +// let chosen = rng.gen(); +// c.insert(chosen.clone()); +// for _ in pivot..n { +// c.insert(random()); +// } + +// (c, chosen) +// }, +// |(c, search)| bench_contains(c, search), +// ), +// ) +// } + +// fn benchmark_container_len(n: usize) -> Observation { +// ( +// n, +// benchmark_op( +// || { +// let mut c = T::default(); +// for _ in 0..n { +// c.insert(random()); +// } +// c +// }, +// |c| c.len(), +// ), +// ) +// } + +// fn benchmark_container_insert(n: usize) -> Observation { +// let setup_closure = || { +// let mut c = T::default(); +// for _ in 0..n { +// c.insert(random()); +// } +// c +// }; +// ( +// n, +// benchmark_op(setup_closure, |c| { +// // TODO: rng generation could throw off benchmarks +// c.insert(random()) +// }), +// ) +// } + +// fn benchmark_container_clear(n: usize) -> Observation { +// let setup_closure = || { +// let mut c = T::default(); +// for _ in 0..n { +// c.insert(random()); +// } +// c +// }; +// (n, benchmark_op(setup_closure, |c| c.clear())) +// } + +// fn benchmark_container_remove(n: usize) -> Observation { +// ( +// n, +// benchmark_op( +// || { +// let mut rng = thread_rng(); +// let mut c = T::default(); + +// // decide where the element that we will remove will be +// let pivot = rng.gen_range(0..n); + +// // insert the element at pivot, and keep track of what it is +// for _ in 0..pivot { +// c.insert(random()); +// } +// let chosen = rng.gen(); +// c.insert(chosen.clone()); +// for _ in pivot..n { +// c.insert(random()); +// } + +// (c, chosen) +// }, +// |(c, chosen)| c.remove(chosen.clone()), +// ), +// ) +// } +// } + +// fn bench_contains<T, E>(c: &mut T, search: &mut E) -> bool +// where +// T: Container<E> + Default, +// E: Clone, +// { +// } diff --git a/src/crates/benchmarker/src/indexable.rs b/src/crates/benchmarker/src/indexable.rs index 86ea759..5a194f0 100644 --- a/src/crates/benchmarker/src/indexable.rs +++ b/src/crates/benchmarker/src/indexable.rs @@ -1,101 +1,109 @@ use std::{any::type_name, collections::HashMap, hint::black_box}; +use criterion::Criterion; use log::debug; -use primrose_library::traits::{Container, Indexable}; +use primrose_library::traits::{Container, Indexable, Stack}; use rand::{distributions::Standard, prelude::Distribution, random}; -use crate::{benchmark_op, Observation, Results}; +pub fn benchmark_indexable<T, E>(c: &mut Criterion) +where + T: Stack<E> + Default, + E: Clone, + Standard: Distribution<E>, +{ + todo!() +} -/// Benchmark [`primrose_library::traits::Indexable`] operations -pub trait IndexableExt<E> { - /// Benchmark at a single `n`. - fn benchmark_indexable_at(n: usize) -> Results; +// /// Benchmark [`primrose_library::traits::Indexable`] operations +// pub trait IndexableExt<E> { +// /// Benchmark at a single `n`. +// fn benchmark_indexable_at(n: usize) -> Results; - /// Benchmark `first` at a single `n`. - fn benchmark_indexable_first(n: usize) -> Observation; +// /// Benchmark `first` at a single `n`. +// fn benchmark_indexable_first(n: usize) -> Observation; - /// Benchmark `last` at a single `n`. - fn benchmark_indexable_last(n: usize) -> Observation; +// /// Benchmark `last` at a single `n`. +// fn benchmark_indexable_last(n: usize) -> Observation; - /// Benchmark `nth` at a single `n`. - fn benchmark_indexable_nth(n: usize) -> Observation; -} +// /// Benchmark `nth` at a single `n`. +// fn benchmark_indexable_nth(n: usize) -> Observation; +// } -impl<T, E> IndexableExt<E> for T -where - T: Container<E> + Indexable<E> + Default, - Standard: Distribution<E>, -{ - fn benchmark_indexable_at(n: usize) -> Results { - let mut by_op = HashMap::new(); +// impl<T, E> IndexableExt<E> for T +// where +// T: Container<E> + Indexable<E> + Default, +// Standard: Distribution<E>, +// { +// fn benchmark_indexable_at(n: usize) -> Results { +// let mut by_op = HashMap::new(); - debug!("Benchmarking {} at n = {}", type_name::<T>(), n); +// debug!("Benchmarking {} at n = {}", type_name::<T>(), n); - debug!("...first"); - by_op.insert( - "first".to_string(), - vec![Self::benchmark_indexable_first(n)], - ); - debug!("...last"); - by_op.insert("last".to_string(), vec![Self::benchmark_indexable_last(n)]); - debug!("...nth"); - by_op.insert("nth".to_string(), vec![Self::benchmark_indexable_nth(n)]); - debug!("--- done!"); +// debug!("...first"); +// by_op.insert( +// "first".to_string(), +// vec![Self::benchmark_indexable_first(n)], +// ); +// debug!("...last"); +// by_op.insert("last".to_string(), vec![Self::benchmark_indexable_last(n)]); +// debug!("...nth"); +// by_op.insert("nth".to_string(), vec![Self::benchmark_indexable_nth(n)]); +// debug!("--- done!"); - Results { by_op } - } +// Results { by_op } +// } - fn benchmark_indexable_first(n: usize) -> Observation { - ( - n, - benchmark_op( - || { - let mut c = T::default(); - for _ in 0..n { - c.insert(random()); - } - c - }, - |c| { - black_box(c.first()); - }, - ), - ) - } +// fn benchmark_indexable_first(n: usize) -> Observation { +// ( +// n, +// benchmark_op( +// || { +// let mut c = T::default(); +// for _ in 0..n { +// c.insert(random()); +// } +// c +// }, +// |c| { +// black_box(c.first()); +// }, +// ), +// ) +// } - fn benchmark_indexable_last(n: usize) -> Observation { - ( - n, - benchmark_op( - || { - let mut c = T::default(); - for _ in 0..n { - c.insert(random()); - } - c - }, - |c| { - black_box(c.last()); - }, - ), - ) - } +// fn benchmark_indexable_last(n: usize) -> Observation { +// ( +// n, +// benchmark_op( +// || { +// let mut c = T::default(); +// for _ in 0..n { +// c.insert(random()); +// } +// c +// }, +// |c| { +// black_box(c.last()); +// }, +// ), +// ) +// } - fn benchmark_indexable_nth(n: usize) -> Observation { - ( - n, - benchmark_op( - || { - let mut c = T::default(); - for _ in 0..n { - c.insert(random()); - } - (c, random::<usize>()) - }, - |(c, fetch)| { - c.nth(*fetch); - }, - ), - ) - } -} +// fn benchmark_indexable_nth(n: usize) -> Observation { +// ( +// n, +// benchmark_op( +// || { +// let mut c = T::default(); +// for _ in 0..n { +// c.insert(random()); +// } +// (c, random::<usize>()) +// }, +// |(c, fetch)| { +// c.nth(*fetch); +// }, +// ), +// ) +// } +// } diff --git a/src/crates/benchmarker/src/lib.rs b/src/crates/benchmarker/src/lib.rs index e6fc69b..eaaace6 100644 --- a/src/crates/benchmarker/src/lib.rs +++ b/src/crates/benchmarker/src/lib.rs @@ -1,81 +1,7 @@ -use std::{collections::HashMap, marker::PhantomData}; - -pub use serde_json::to_writer; - -mod bench; -pub use bench::benchmark_op; - mod container; -pub use container::ContainerExt; - mod indexable; -pub use indexable::IndexableExt; - mod stack; -pub use stack::StackExt; - -mod results; -pub use results::*; - -/// Runs benchmarks at varying `n`s with a builder-style interface. -/// -/// This mostly just makes our code generation easier. -pub struct Benchmarker<T, E>(&'static [usize], Results, PhantomData<(T, E)>); -impl<T, E> Benchmarker<T, E> { - /// Create a benchmarker that will repeat all benchmarks with each of the given n values. - pub fn with_ns(ns: &'static [usize]) -> Self { - Self( - ns, - Results { - by_op: HashMap::new(), - }, - PhantomData, - ) - } - - /// Finish benchmarking and get the results - pub fn finish(self) -> Results { - self.1 - } -} - -impl<T, E> Benchmarker<T, E> -where - T: ContainerExt<E>, -{ - /// Run benchmarks for [`primrose_library::traits::Container`] operations. - pub fn container(mut self) -> Self { - for n in self.0 { - self.1.merge(T::benchmark_container_at(*n)); - } - - self - } -} -impl<T, E> Benchmarker<T, E> -where - T: IndexableExt<E>, -{ - /// Run benchmarks for [`primrose_library::traits::Indexable`] operations. - pub fn indexable(mut self) -> Self { - for n in self.0 { - self.1.merge(T::benchmark_indexable_at(*n)); - } - - self - } -} - -impl<T, E> Benchmarker<T, E> -where - T: StackExt<E>, -{ - /// Run benchmarks for [`primrose_library::traits::Stack`] operations. - pub fn stack(mut self) -> Self { - for n in self.0 { - self.1.merge(T::benchmark_stack_at(*n)); - } - self - } -} +pub use container::*; +pub use indexable::*; +pub use stack::*; diff --git a/src/crates/benchmarker/src/results.rs b/src/crates/benchmarker/src/results.rs deleted file mode 100644 index 3c0783f..0000000 --- a/src/crates/benchmarker/src/results.rs +++ /dev/null @@ -1,47 +0,0 @@ -use std::{collections::HashMap, time::Duration}; - -use serde::{Deserialize, Serialize}; - -/// Results for a whole suite of benchmarks -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Results { - /// Results for each collection operation - pub by_op: HashMap<OpName, Vec<Observation>>, -} - -/// Name of an operation -pub type OpName = String; - -/// The first key in the tuple is the `n` of the container before the benchmark was taken, and the second the results of the benchmark. -pub type Observation = (usize, BenchmarkResult); - -/// Results for a single benchmark -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct BenchmarkResult { - /// Number of times the benchmark was run - pub times: usize, - - /// The minimum time taken - pub min: Duration, - - /// The maximum time taken - pub max: Duration, - - /// The average (mean) time taken - pub avg: Duration, -} - -impl Results { - /// Merge results from `b` into these results. - /// If `b` contains benchmarks for operations we have a result for, the observations are merged. - pub fn merge(&mut self, b: Self) -> &mut Self { - for (name, mut res_b) in b.by_op { - self.by_op - .entry(name) - .and_modify(|res| res.append(&mut res_b)) - .or_insert(res_b); - } - - self - } -} diff --git a/src/crates/benchmarker/src/stack.rs b/src/crates/benchmarker/src/stack.rs index 7a1cc6e..7f32f0b 100644 --- a/src/crates/benchmarker/src/stack.rs +++ b/src/crates/benchmarker/src/stack.rs @@ -1,71 +1,79 @@ use std::{any::type_name, collections::HashMap}; +use criterion::Criterion; use log::debug; -use primrose_library::traits::Stack; +use primrose_library::traits::{Container, Stack}; use rand::{distributions::Standard, prelude::Distribution, random}; -use crate::{benchmark_op, Observation, Results}; - -/// Benchmark [`primrose_library::traits::Stack`] operations -pub trait StackExt<E> { - /// Benchmark at a single `n`. - fn benchmark_stack_at(n: usize) -> Results; - - /// Benchmark `push` at a single `n`. - fn benchmark_stack_push(n: usize) -> Observation; - - /// Benchmark `pop` at a single `n`. - fn benchmark_stack_pop(n: usize) -> Observation; -} - -impl<T, E> StackExt<E> for T +pub fn benchmark_stack<T, E>(c: &mut Criterion) where T: Stack<E> + Default, + E: Clone, Standard: Distribution<E>, { - fn benchmark_stack_at(n: usize) -> Results { - let mut by_op = HashMap::new(); + todo!() +} - debug!("Benchmarking {} at n = {}", type_name::<T>(), n); +// /// Benchmark [`primrose_library::traits::Stack`] operations +// pub trait StackExt<E> { +// /// Benchmark at a single `n`. +// fn benchmark_stack_at(n: usize) -> Results; - debug!("...push"); - by_op.insert("push".to_string(), vec![Self::benchmark_stack_push(n)]); - debug!("...pop"); - by_op.insert("pop".to_string(), vec![Self::benchmark_stack_pop(n)]); - debug!("--- done!"); +// /// Benchmark `push` at a single `n`. +// fn benchmark_stack_push(n: usize) -> Observation; - Results { by_op } - } +// /// Benchmark `pop` at a single `n`. +// fn benchmark_stack_pop(n: usize) -> Observation; +// } - fn benchmark_stack_push(n: usize) -> Observation { - ( - n, - benchmark_op( - || { - let mut c = T::default(); - for _ in 0..n { - c.push(random()); - } - c - }, - |s| s.push(random()), - ), - ) - } +// impl<T, E> StackExt<E> for T +// where +// T: Stack<E> + Default, +// Standard: Distribution<E>, +// { +// fn benchmark_stack_at(n: usize) -> Results { +// let mut by_op = HashMap::new(); - fn benchmark_stack_pop(n: usize) -> Observation { - ( - n, - benchmark_op( - || { - let mut c = T::default(); - for _ in 0..n { - c.push(random()); - } - c - }, - |s| s.pop(), - ), - ) - } -} +// debug!("Benchmarking {} at n = {}", type_name::<T>(), n); + +// debug!("...push"); +// by_op.insert("push".to_string(), vec![Self::benchmark_stack_push(n)]); +// debug!("...pop"); +// by_op.insert("pop".to_string(), vec![Self::benchmark_stack_pop(n)]); +// debug!("--- done!"); + +// Results { by_op } +// } + +// fn benchmark_stack_push(n: usize) -> Observation { +// ( +// n, +// benchmark_op( +// || { +// let mut c = T::default(); +// for _ in 0..n { +// c.push(random()); +// } +// c +// }, +// |s| s.push(random()), +// ), +// ) +// } + +// fn benchmark_stack_pop(n: usize) -> Observation { +// ( +// n, +// benchmark_op( +// || { +// let mut c = T::default(); +// for _ in 0..n { +// c.push(random()); +// } +// c +// }, +// |s| s.pop(), +// ), +// ) +// } +// } |