aboutsummaryrefslogtreecommitdiff
path: root/src/crates/candelabra
diff options
context:
space:
mode:
authorAria <me@aria.rip>2024-01-17 13:00:49 +0000
committerAria <me@aria.rip>2024-01-17 13:00:49 +0000
commit86e515a1db17ca1c0bae8eeba8e1164dce1a8d13 (patch)
tree1405ed90e04019efb0506d98af9031654dda82bc /src/crates/candelabra
parent19617832ff12b74b60f673a3460256c2eea840f5 (diff)
fix(benchmarker): some cleanup and convenience
Diffstat (limited to 'src/crates/candelabra')
-rw-r--r--src/crates/candelabra/src/cost/benchmark.rs35
1 files changed, 20 insertions, 15 deletions
diff --git a/src/crates/candelabra/src/cost/benchmark.rs b/src/crates/candelabra/src/cost/benchmark.rs
index 1de8c07..9c7266d 100644
--- a/src/crates/candelabra/src/cost/benchmark.rs
+++ b/src/crates/candelabra/src/cost/benchmark.rs
@@ -1,5 +1,7 @@
//! Benchmarking of container types
+use std::io::{self, Read};
+use std::process::Stdio;
use std::str::FromStr;
use std::{
collections::HashMap,
@@ -10,7 +12,7 @@ use std::{
};
use anyhow::{bail, Context, Result};
-use log::{debug, log_enabled, Level};
+use log::debug;
use primrose::{LibSpec, LibSpecs};
use serde::{Deserialize, Serialize};
use tempfile::{tempdir, TempDir};
@@ -21,7 +23,7 @@ use crate::paths::Paths;
pub const ELEM_TYPE: &str = "usize";
/// String representation of the array of N values we use for benchmarking
-pub const NS: &str = "[8, 256, 1024, 65536]";
+pub const NS: &str = "[8, 256, 1024]";
/// Results for a whole suite of benchmarks
#[derive(Serialize, Deserialize, Debug, Clone)]
@@ -61,29 +63,32 @@ pub fn run_benchmarks(name: &str, paths: &Paths, lib_specs: &LibSpecs) -> Result
// Build and run
debug!("Building and running benchmarks for {}", name);
- let run_output = Command::new("cargo")
+ let mut child = Command::new("cargo")
.args(["run", "--release", "--", "--bench"])
.current_dir(crate_.path())
.env("CARGO_TARGET_DIR", &paths.target_dir) // Share target directory
- .output()
+ .stdout(Stdio::piped())
+ .spawn()
.context("Error running build command")?;
- if !run_output.status.success() {
- bail!("Error result from benchmark. Output: {:?}", run_output);
+ // tee the output to stdout and a vector
+ let mut stdout = child.stdout.take().unwrap();
+ let mut output = Vec::new();
+ let mut buf = vec![0; 100];
+ while let Ok(None) = child.try_wait() {
+ let n = stdout.read(&mut buf)?;
+ let read = &buf[0..n];
+ io::stdout().write_all(read)?;
+ output.extend(read);
}
- if log_enabled!(Level::Debug) {
- if let Ok(stdout) = String::from_utf8(run_output.stdout.clone()) {
- debug!("stdout: {:?}", stdout);
- }
- if let Ok(stderr) = String::from_utf8(run_output.stderr.clone()) {
- debug!("stderr: {:?}", stderr);
- }
+ let run_output = child.try_wait().unwrap().unwrap();
+ if !run_output.success() {
+ bail!("Error result from benchmark. Output: {:?}", run_output);
}
// Deserialise benchmark results
- let output =
- String::from_utf8(run_output.stdout).context("Error interpreting output as UTF-8")?;
+ let output = String::from_utf8(output).context("Error interpreting output as UTF-8")?;
let measurements = output
.lines()
.flat_map(|l| {