diff options
author | Aria Shrimpton <me@aria.rip> | 2024-01-19 23:42:38 +0000 |
---|---|---|
committer | Aria Shrimpton <me@aria.rip> | 2024-01-19 23:42:38 +0000 |
commit | 8ab9e6779d714aa3f9bfc769a36a9eae7d61c0f9 (patch) | |
tree | 550407157e63e1b5a1e8e582a7a9ddf4b723284b | |
parent | 0490d79107fd2b732054c2c59d534a8cf5026011 (diff) |
feat(cli): compare real results against chosen assignment
-rw-r--r-- | src/crates/candelabra/src/confirmation.rs | 100 | ||||
-rw-r--r-- | src/crates/candelabra/src/cost/benchmark.rs | 55 | ||||
-rw-r--r-- | src/crates/candelabra/src/cost/mod.rs | 2 | ||||
-rw-r--r-- | src/crates/candelabra/src/lib.rs | 1 | ||||
-rw-r--r-- | src/crates/candelabra/src/profiler.rs | 17 | ||||
-rw-r--r-- | src/crates/cli/Cargo.toml | 1 | ||||
-rw-r--r-- | src/crates/cli/src/select.rs | 66 | ||||
-rw-r--r-- | src/tests/Cargo.toml | 5 | ||||
-rw-r--r-- | src/tests/example_stack/Cargo.toml | 5 | ||||
-rw-r--r-- | src/tests/example_stack/benches/do_stuff.rs | 12 |
10 files changed, 227 insertions, 37 deletions
diff --git a/src/crates/candelabra/src/confirmation.rs b/src/crates/candelabra/src/confirmation.rs new file mode 100644 index 0000000..8949f56 --- /dev/null +++ b/src/crates/candelabra/src/confirmation.rs @@ -0,0 +1,100 @@ +use anyhow::{Context, Result}; +use camino::Utf8Path; +use log::{debug, trace}; +use primrose::ContainerSelector; +use std::{ + collections::HashMap, + fs::File, + io::Write, + process::{Command, Stdio}, +}; + +use crate::{ + candidates::{ConTypeName, ImplName}, + cost::{ + benchmark::{parse_criterion_output, tee_output}, + BenchmarkResult, + }, + Project, State, +}; + +impl State { + /// Run all benchmarks for the given project with the given container selections, returning the results + pub fn run_benchmarks_with( + &self, + proj: &Project, + selections: &HashMap<&ConTypeName, &ImplName>, + ) -> Result<HashMap<String, BenchmarkResult>> { + self.save_choices(proj, selections) + .context("error setting up project")?; + + self.run_benchmarks(proj) + .context("error running benchmarks") + } + + /// Run all benchmarks in the given project, returning their results. + /// This can only parse criterion-formatted output + pub fn run_benchmarks(&self, proj: &Project) -> Result<HashMap<String, BenchmarkResult>> { + let child = Command::new("cargo") + .args(["bench"]) + .current_dir(proj.source_dir.as_std_path()) + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .spawn() + .context("Error running benchmark command")?; + + let output = tee_output(child)?; + Ok(parse_criterion_output(&output) + .map(|(name, result)| (name.to_string(), result)) + .collect()) + } + + /// Use the given selections for the container types in this project. + /// Panics if a container type has not been selected. + pub fn save_choices( + &self, + proj: &Project, + selections: &HashMap<&ConTypeName, &ImplName>, + ) -> Result<()> { + debug!("Saving choices for project {}", proj.name); + for (file, file_con_types) in self.project_candidate_list(proj)?.iter() { + // FIXME: this will cause type names to collide across files - need to rework this + let chosen = selections + .iter() + .filter(|(ctn, _)| file_con_types.iter().any(|(ctn2, _)| **ctn == ctn2)) + .map(|(ctn, can)| (*ctn, can.as_str())); + + self.save_choices_file(file, chosen) + .with_context(|| format!("error setting up {}", file))?; + } + + Ok(()) + } + + /// Use the given selections for the container types in this file. + /// Panics if a container type has not been selected, or a choice is given for a container type not in this file. + fn save_choices_file<'a>( + &self, + file: &Utf8Path, + choices: impl Iterator<Item = (&'a String, &'a str)>, + ) -> Result<()> { + debug!("Saving choices for {}", file); + let selector = ContainerSelector::from_path( + file.as_std_path(), + self.paths.library_src.as_std_path(), + self.model_size, + )?; + + let new_code = selector.gen_replacement_file(choices); + let new_path = file.to_string().replace(".pr", ""); + + trace!("New code: {}", new_code); + trace!("New path: {}", new_path); + + let mut f = File::create(new_path).context("error creating new source file")?; + f.write_all(new_code.as_bytes()) + .context("error writing new code")?; + + Ok(()) + } +} diff --git a/src/crates/candelabra/src/cost/benchmark.rs b/src/crates/candelabra/src/cost/benchmark.rs index 25d0d37..2d3470a 100644 --- a/src/crates/candelabra/src/cost/benchmark.rs +++ b/src/crates/candelabra/src/cost/benchmark.rs @@ -1,7 +1,7 @@ //! Benchmarking of container types use std::io::{self, Read}; -use std::process::Stdio; +use std::process::{Child, Stdio}; use std::str::FromStr; use std::{ collections::HashMap, @@ -12,7 +12,7 @@ use std::{ }; use anyhow::{bail, Context, Result}; -use log::debug; +use log::{debug, log_enabled, Level}; use primrose::{LibSpec, LibSpecs}; use serde::{Deserialize, Serialize}; use tempfile::{tempdir, TempDir}; @@ -66,14 +66,34 @@ pub fn run_benchmarks(name: &str, paths: &Paths, lib_specs: &LibSpecs) -> Result // Build and run debug!("Building and running benchmarks for {}", name); - let mut child = Command::new("cargo") + let child = Command::new("cargo") .args(["run", "--release", "--", "--bench"]) .current_dir(crate_.path()) .env("CARGO_TARGET_DIR", &paths.target_dir) // Share target directory .stdout(Stdio::piped()) + .stderr(Stdio::null()) .spawn() .context("Error running build command")?; + // Deserialise benchmark results + let output = tee_output(child)?; + let measurements = parse_criterion_output(&output).flat_map(|(name, result)| { + let (op, n) = name.trim().split_once('/')?; + Some((op, usize::from_str(n).ok()?, result)) + }); + + let mut by_op = HashMap::new(); + for (op, n, result) in measurements { + by_op + .entry(op.to_string()) + .and_modify(|v: &mut Vec<(usize, BenchmarkResult)>| v.push((n, result.clone()))) + .or_insert(vec![(n, result)]); + } + + Ok(Results { by_op }) +} + +pub(crate) fn tee_output(mut child: Child) -> Result<String> { // tee the output to stdout and a vector let mut stdout = child.stdout.take().unwrap(); let mut output = Vec::new(); @@ -81,7 +101,9 @@ pub fn run_benchmarks(name: &str, paths: &Paths, lib_specs: &LibSpecs) -> Result while let Ok(None) = child.try_wait() { let n = stdout.read(&mut buf)?; let read = &buf[0..n]; - io::stdout().write_all(read)?; + if log_enabled!(Level::Debug) { + io::stdout().write_all(read)?; + } output.extend(read); } @@ -91,8 +113,13 @@ pub fn run_benchmarks(name: &str, paths: &Paths, lib_specs: &LibSpecs) -> Result } // Deserialise benchmark results - let output = String::from_utf8(output).context("Error interpreting output as UTF-8")?; - let measurements = output + Ok(String::from_utf8(output).context("Error interpreting output as UTF-8")?) +} + +pub(crate) fn parse_criterion_output( + output: &str, +) -> impl Iterator<Item = (&str, BenchmarkResult)> { + output .lines() .flat_map(|l| { // looking for lines like: @@ -113,20 +140,8 @@ pub fn run_benchmarks(name: &str, paths: &Paths, lib_specs: &LibSpecs) -> Result max: parse_time_str(timings.next()?, timings.next()?)?, }; - let (op, n) = name.trim().split_once('/')?; - - Some((op, usize::from_str(n).ok()?, result)) - }); - - let mut by_op = HashMap::new(); - for (op, n, result) in measurements { - by_op - .entry(op.to_string()) - .and_modify(|v: &mut Vec<(usize, BenchmarkResult)>| v.push((n, result.clone()))) - .or_insert(vec![(n, result)]); - } - - Ok(Results { by_op }) + Some((name, result)) + }) } fn parse_time_str(quantity: &str, suffix: &str) -> Option<Duration> { diff --git a/src/crates/candelabra/src/cost/mod.rs b/src/crates/candelabra/src/cost/mod.rs index f29227f..bbe4942 100644 --- a/src/crates/candelabra/src/cost/mod.rs +++ b/src/crates/candelabra/src/cost/mod.rs @@ -1,5 +1,5 @@ //! Generating, caching, and using cost models -mod benchmark; +pub(crate) mod benchmark; mod fit; pub use benchmark::{BenchmarkResult, Results as BenchmarkResults}; diff --git a/src/crates/candelabra/src/lib.rs b/src/crates/candelabra/src/lib.rs index 1aa62d1..19445f0 100644 --- a/src/crates/candelabra/src/lib.rs +++ b/src/crates/candelabra/src/lib.rs @@ -7,6 +7,7 @@ extern crate nalgebra as na; mod cache; pub mod candidates; +mod confirmation; pub mod cost; pub mod profiler; mod select; diff --git a/src/crates/candelabra/src/profiler.rs b/src/crates/candelabra/src/profiler.rs index 6984297..3897d97 100644 --- a/src/crates/candelabra/src/profiler.rs +++ b/src/crates/candelabra/src/profiler.rs @@ -2,7 +2,7 @@ use anyhow::{anyhow, bail, Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; -use log::{debug, trace, warn}; +use log::{debug, log_enabled, trace, warn, Level}; use primrose::ContainerSelector; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -221,13 +221,18 @@ impl State { name, profiler_out_dir ); - let output = Command::new("cargo") + let mut command = Command::new("cargo"); + command .current_dir(&project.source_dir) .args(["bench", "--bench", name]) - .env("PROFILER_OUT_DIR", profiler_out_dir.as_ref()) // Where profiler info gets outputted - .stderr(Stdio::inherit()) - .stdout(Stdio::inherit()) - .output()?; + .env("PROFILER_OUT_DIR", profiler_out_dir.as_ref()); // Where profiler info gets outputted + + if log_enabled!(Level::Debug) { + command.stderr(Stdio::inherit()).stdout(Stdio::inherit()); + } else { + command.stderr(Stdio::null()).stdout(Stdio::null()); + }; + let output = command.output()?; if !output.status.success() { bail!("Error running benchmark"); diff --git a/src/crates/cli/Cargo.toml b/src/crates/cli/Cargo.toml index 95fb5cc..09d7b4b 100644 --- a/src/crates/cli/Cargo.toml +++ b/src/crates/cli/Cargo.toml @@ -8,6 +8,7 @@ default-run = "candelabra-cli" [dependencies] candelabra = { path = "../candelabra" } +primrose = { path = "../primrose" } log = { workspace = true } anyhow = { workspace = true } env_logger = { workspace = true } diff --git a/src/crates/cli/src/select.rs b/src/crates/cli/src/select.rs index 3ef43cd..5b9b755 100644 --- a/src/crates/cli/src/select.rs +++ b/src/crates/cli/src/select.rs @@ -1,6 +1,9 @@ +use std::collections::HashMap; + use anyhow::Result; use argh::FromArgs; use log::info; +use primrose::tools::nary_cartesian_product; use tabled::{builder::Builder, settings::Style}; use crate::State; @@ -8,10 +11,14 @@ use crate::State; /// Select an implementation for each container type and project #[derive(FromArgs)] #[argh(subcommand, name = "select")] -pub struct Args {} +pub struct Args { + /// run project benchmarks with each candidate and compare estimated vs real position + #[argh(switch)] + compare: bool, +} impl State { - pub fn cmd_select(&self, _: Args) -> Result<()> { + pub fn cmd_select(&self, a: Args) -> Result<()> { for proj in self.projects.iter() { info!("Processing project {}", &proj.name); @@ -29,7 +36,60 @@ impl State { } } - println!("{}", builder.build().with(Style::sharp())) + println!("{}", builder.build().with(Style::sharp())); + + if !a.compare { + continue; + } + + // 'brute force' and time all possibilities + let candidates = self + .inner + .project_candidate_list(proj)? + .into_iter() + .flat_map(|(_, v)| v.into_iter()) + .collect(); + let possible_assignments = nary_cartesian_product(&candidates); + let mut assignments_results = HashMap::new(); + for assignment in possible_assignments.iter() { + info!("Running benchmarks with {:?}", &assignment); + assignments_results.insert( + format!("{:?}", &assignment), + self.inner.run_benchmarks_with(proj, assignment)?, + ); + } + + let mut builder = Builder::default(); + + if assignments_results.is_empty() { + continue; + } + + let header = { + let mut header = vec!["assignment"]; + assignments_results + .iter() + .next() + .unwrap() + .1 + .keys() + .for_each(|k| header.push(k)); + header + }; + builder.set_header(header.clone()); + + for (assignment, benchmark_results) in assignments_results.iter() { + let mut record = vec![assignment.to_string()]; + for i in 1..header.len() { + record.push(format!( + "{:?}", + benchmark_results.get(header[i]).unwrap().avg + )); + } + builder.push_record(record); + } + + println!("{}", builder.build().with(Style::sharp())); } Ok(()) } diff --git a/src/tests/Cargo.toml b/src/tests/Cargo.toml index d4d9ab2..b40694f 100644 --- a/src/tests/Cargo.toml +++ b/src/tests/Cargo.toml @@ -2,4 +2,7 @@ resolver = "2" members = [ "example_stack", -]
\ No newline at end of file +] + +[workspace.dependencies] +criterion = "0.3" diff --git a/src/tests/example_stack/Cargo.toml b/src/tests/example_stack/Cargo.toml index 3e3e617..55e4338 100644 --- a/src/tests/example_stack/Cargo.toml +++ b/src/tests/example_stack/Cargo.toml @@ -8,6 +8,9 @@ edition = "2021" [dependencies] primrose-library = { path = "../../crates/library" } +[dev-dependencies] +criterion = { workspace = true } + [[bench]] name = "do_stuff" -harness = false
\ No newline at end of file +harness = false diff --git a/src/tests/example_stack/benches/do_stuff.rs b/src/tests/example_stack/benches/do_stuff.rs index 69d50d5..c7e2e23 100644 --- a/src/tests/example_stack/benches/do_stuff.rs +++ b/src/tests/example_stack/benches/do_stuff.rs @@ -1,6 +1,8 @@ -fn main() { - // TODO: some actual benchmarking, not just a random loop - for _ in 0..20 { - example_stack::do_something(); - } +use criterion::{criterion_group, criterion_main, Criterion}; + +fn run_benches(c: &mut Criterion) { + c.bench_function("do_something", |b| b.iter(|| example_stack::do_something())); } + +criterion_group!(benches, run_benches); +criterion_main!(benches); |