From 59caeec2d65d1da81909f82ccb3d6a44e6dffa9a Mon Sep 17 00:00:00 2001 From: technillogue Date: Sat, 29 Jul 2023 16:46:04 -0400 Subject: [PATCH] make connection pool size configurable and add throughput logging --- storage/src/backend/connection.rs | 9 +++++++-- storage/src/cache/mod.rs | 15 +++++++++++---- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/storage/src/backend/connection.rs b/storage/src/backend/connection.rs index d9a755e26c8..b8068a5d5c0 100644 --- a/storage/src/backend/connection.rs +++ b/storage/src/backend/connection.rs @@ -10,7 +10,7 @@ use std::str::FromStr; use std::sync::atomic::{AtomicBool, AtomicI16, AtomicU8, Ordering}; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -use std::{fmt, thread}; +use std::{env, fmt, thread}; use log::{max_level, Level}; @@ -626,6 +626,11 @@ impl Connection { } else { None }; + // get pool size from envvar + let pool_max_idle_per_host = match env::var("REGISTRY_CLIENT_POOL_MAX_IDLE_PER_HOST") { + Ok(val) => val.parse::().unwrap_or(20), + Err(_) => 20, + }; let mut cb = Client::builder() .timeout(timeout) @@ -633,7 +638,7 @@ impl Connection { .redirect(Policy::none()) .use_rustls_tls() .tcp_keepalive(Some(Duration::from_secs(5 * 60))) - .pool_max_idle_per_host(20); + .pool_max_idle_per_host(pool_max_idle_per_host); if config.skip_verify { cb = cb.danger_accept_invalid_certs(true); diff --git a/storage/src/cache/mod.rs b/storage/src/cache/mod.rs index cc65f842919..d9b5eaae5ea 100644 --- a/storage/src/cache/mod.rs +++ b/storage/src/cache/mod.rs @@ -270,13 +270,17 @@ pub trait BlobCache: Send + Sync { ))); } let duration = Instant::now().duration_since(start).as_millis(); + let duration_s = duration as f64 / 1000.0; + let throughput_mbps = blob_size as f64 / duration_s / 1_000_000.0; + debug!( - "read_chunks_from_backend: {} {} {} bytes at {}, duration {}ms", + "read_chunks_from_backend: {} {} {} bytes at {}, duration {}ms, throughput {:.4}Mbps", std::thread::current().name().unwrap_or_default(), if prefetch { "prefetch" } else { "fetch" }, blob_size, blob_offset, - duration + duration, + throughput_mbps ); let chunks = chunks.iter().map(|v| v.as_ref()).collect(); @@ -328,12 +332,15 @@ pub trait BlobCache: Send + Sync { } let duration = Instant::now().duration_since(start).as_millis(); + let duration_s = duration as f64 / 1000.0; + let throughput_mbps = chunk.compressed_size() as f64 / duration_s / 1_000_000.0; debug!( - "read_chunk_from_backend: {} {} bytes at {}, duration {}ms", + "read_chunk_from_backend: {} {} bytes at {}, duration {}ms, throughput {:.4}Mbps", std::thread::current().name().unwrap_or_default(), chunk.compressed_size(), chunk.compressed_offset(), - duration + duration, + throughput_mbps ); self.validate_chunk_data(chunk, buffer, false) .map_err(|e| {