Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

296 fix benchmark build errors #297

Merged
merged 5 commits into from
Dec 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@ jobs:
with:
command: build
args: --features async-io-rio
- name: Build benchmarks
uses: actions-rs/cargo@v1
with:
command: build
args: --features benchmark

build-windows:
runs-on: windows-latest
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Pearl changelog


#### Fixed
- Fixed benchmark build error (#296)
- Fixed build warnings (#300)
- Fix the build by adding yanked aHash implementation (#302)

Expand Down
3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,14 @@ async-lock = "2.7"
# Benchmark only dependencies
clap = { version = "3.2", optional = true }
env_logger = { version = "0.9", optional = true }
rand = { version = "0.8", optional = true }

[dependencies.tokio]
version = "1.28"
features = ["fs", "io-util", "sync", "time", "rt", "macros", "rt-multi-thread"]

[features]
benchmark = ["dep:clap", "dep:env_logger"]
benchmark = ["dep:clap", "dep:env_logger", "dep:rand"]
async-io-rio = ["dep:rio"]

[lib]
Expand Down
117 changes: 68 additions & 49 deletions src/benchmark/bin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,12 @@ mod prelude {
stream::{FuturesUnordered, StreamExt},
};
pub(crate) use log::{Level, LevelFilter};
pub(crate) use pearl::{Builder, Key, Storage};
pub(crate) use pearl::{BlobRecordTimestamp, Builder, Key, RefKey, Storage};
pub(crate) use rand::{rngs::ThreadRng, RngCore};
pub(crate) use std::{
io::Write,
ops::Add,
path::{Path, PathBuf},
sync::Arc,
time::{Duration, Instant},
};
}
Expand Down Expand Up @@ -73,62 +72,62 @@ async fn start_app() {

info!("Start write cycle");
let (tx, rx) = channel::<statistics::Report>(1024);
let writer = Arc::new(writer);
let mut counter = 0;

let futures_limit: usize = matches.value_of("futures_limit").unwrap().parse().unwrap();

let prepared = (0..futures_limit).map(|_| generator.next().unwrap());

let mut futures_pool: FuturesUnordered<_> = prepared
.into_iter()
.map(|(key, data)| {
let ltx = tx.clone();
counter += 1;
writer.write(key, data, ltx)
})
.collect();
println!(
"{:<10}{:<10}{:<10}{:<10}{:<10}",
"Completed", "Active", "Limit", "Total", "%"
);
let write_limit = limit * 1000 / value_size_kb;
let mut prev_p = 0;
while futures_pool.next().await.is_some() {
debug!("#{}/{} future ready", counter, futures_pool.len());
let percent = counter * 1000 / write_limit;
if prev_p != percent {
print!(
"\r{:<10}{:<10}{:<10}{:<10}{:<10}",
counter,
futures_pool.len(),
futures_limit,
write_limit,
percent / 10
);
if percent % 50 == 0 {
println!();
}
}
prev_p = percent;
if futures_pool.len() < futures_limit {
if let Some((key, data)) = generator.next() {
{
let mut futures_pool: FuturesUnordered<_> = prepared
.into_iter()
.map(|(key, data)| {
let ltx = tx.clone();
counter += 1;
futures_pool.push(writer.write(key.into(), data, ltx));
writer.write(key, data, ltx)
})
.collect();
println!(
"{:<10}{:<10}{:<10}{:<10}{:<10}",
"Completed", "Active", "Limit", "Total", "%"
);
let write_limit = limit * 1000 / value_size_kb;
let mut prev_p = 0;
while futures_pool.next().await.is_some() {
debug!("#{}/{} future ready", counter, futures_pool.len());
let percent = counter * 1000 / write_limit;
if prev_p != percent {
print!(
"\r{:<10}{:<10}{:<10}{:<10}{:<10}",
counter,
futures_pool.len(),
futures_limit,
write_limit,
percent / 10
);
if percent % 50 == 0 {
println!();
}
}
prev_p = percent;
if futures_pool.len() < futures_limit {
if let Some((key, data)) = generator.next() {
let ltx = tx.clone();
counter += 1;
futures_pool.push(writer.write(key.into(), data, ltx));
}
}
debug!("#{}/{} next await", counter, futures_pool.len());
}
debug!("#{}/{} next await", counter, futures_pool.len());
}

info!("start await ");
let _ = rx
.take(counter as usize)
.map(|r| statistics.add(r))
.collect::<Vec<_>>()
.await;
info!("end await ");
statistics.display();
info!("start await ");
let _ = rx
.take(counter as usize)
.map(|r| statistics.add(r))
.collect::<Vec<_>>()
.await;
info!("end await ");
statistics.display();
}
writer.close().await;
}

Expand Down Expand Up @@ -206,8 +205,21 @@ fn init_logger() {
#[derive(Debug, Default, PartialOrd, Ord, PartialEq, Eq, Clone)]
pub struct Key128(Vec<u8>);

impl Key for Key128 {
#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct RefKeyType<'a>(&'a [u8]);

impl<'a> From<&'a [u8]> for RefKeyType<'a> {
fn from(v: &'a [u8]) -> Self {
Self(v)
}
}

impl<'a> RefKey<'a> for RefKeyType<'a> {}

impl<'a> Key<'a> for Key128 {
const LEN: u16 = 8;
const MEM_SIZE: usize = 16 * 8;
type Ref = RefKeyType<'a>;
}

impl AsRef<Key128> for Key128 {
Expand All @@ -223,6 +235,13 @@ impl From<Vec<u8>> for Key128 {
}
}

impl<'a> From<&'a [u8]> for Key128 {
fn from(v: &'a [u8]) -> Self {
assert_eq!(Self::LEN as usize, v.len());
Self(v.to_vec())
}
}

impl AsRef<[u8]> for Key128 {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
Expand Down
20 changes: 11 additions & 9 deletions src/benchmark/writer.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
use super::prelude::*;
use pearl::rio;

pub struct Writer<K: Key + 'static> {
pub struct Writer<K: for<'a> Key<'a> + 'static> {
storage: Storage<K>,
}

impl<K: Key> Writer<K> {
impl<K: for<'a> Key<'a>> Writer<K> {
pub fn new(
tmp_dir: &Path,
max_blob_size: u64,
Expand All @@ -16,14 +15,14 @@ impl<K: Key> Writer<K> {
.blob_file_name_prefix("benchmark")
.max_blob_size(max_blob_size)
.max_data_in_blob(max_data_in_blob)
.work_dir(tmp_dir.join("pearl_benchmark"));
.work_dir(tmp_dir.join("pearl_benchmark"))
.set_io_driver(pearl::IoDriver::new());
if allow_duplicates {
info!("duplicates allowed");
builder = builder.allow_duplicates();
}

let rio = rio::new().unwrap();
let storage = builder.enable_aio(rio).build().unwrap();
let storage = builder.build().unwrap();
Self { storage }
}

Expand All @@ -35,14 +34,17 @@ impl<K: Key> Writer<K> {
let kbuf: &[u8] = key.as_ref().as_ref();
let mut report = Report::new(kbuf.len(), data.len());
let now = Instant::now();
self.storage.write(key, data).await.unwrap();
self.storage
.write(key, data.into(), BlobRecordTimestamp::now())
.await
.unwrap();
debug!("write finished");
report.set_latency(now);
tx.try_send(report).unwrap();
debug!("report sent");
}

pub async fn close(&self) {
self.storage.clone().close().await.unwrap();
pub async fn close(self) {
self.storage.close().await.unwrap();
}
}
Loading