Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

uses nanos precision for timestamp when submitting metrics to influxdb (backport #20623) #20659

Merged
merged 1 commit into from
Oct 13, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 13 additions & 8 deletions metrics/src/counter.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
use crate::metrics::submit_counter;
use log::*;
use solana_sdk::timing;
use std::env;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use {
crate::metrics::submit_counter,
log::*,
solana_sdk::timing,
std::{
env,
sync::atomic::{AtomicU64, AtomicUsize, Ordering},
time::SystemTime,
},
};

const DEFAULT_LOG_RATE: usize = 1000;
// Submit a datapoint every second by default
Expand All @@ -23,7 +28,7 @@ pub struct Counter {
pub struct CounterPoint {
pub name: &'static str,
pub count: i64,
pub timestamp: u64,
pub timestamp: SystemTime,
}

impl CounterPoint {
Expand All @@ -32,7 +37,7 @@ impl CounterPoint {
CounterPoint {
name,
count: 0,
timestamp: 0,
timestamp: std::time::UNIX_EPOCH,
}
}
}
Expand Down Expand Up @@ -198,7 +203,7 @@ impl Counter {
let counter = CounterPoint {
name: self.name,
count: counts as i64 - lastlog as i64,
timestamp: now,
timestamp: SystemTime::now(),
};
submit_counter(counter, level, bucket);
}
Expand Down
6 changes: 3 additions & 3 deletions metrics/src/datapoint.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
use std::fmt;
use std::{fmt, time::SystemTime};

#[derive(Clone, Debug)]
pub struct DataPoint {
pub name: &'static str,
pub timestamp: u64,
pub timestamp: SystemTime,
pub fields: Vec<(&'static str, String)>,
}

impl DataPoint {
pub fn new(name: &'static str) -> Self {
DataPoint {
name,
timestamp: solana_sdk::timing::timestamp(),
timestamp: SystemTime::now(),
fields: vec![],
}
}
Expand Down
40 changes: 22 additions & 18 deletions metrics/src/metrics.rs
Original file line number Diff line number Diff line change
@@ -1,20 +1,23 @@
//! The `metrics` module enables sending measurements to an `InfluxDB` instance

use crate::{counter::CounterPoint, datapoint::DataPoint};
use gethostname::gethostname;
use lazy_static::lazy_static;
use log::*;
use solana_sdk::hash::hash;
use std::{
collections::HashMap,
convert::Into,
sync::{
mpsc::{channel, Receiver, RecvTimeoutError, Sender},
Arc, Barrier, Mutex, Once, RwLock,
use {
crate::{counter::CounterPoint, datapoint::DataPoint},
gethostname::gethostname,
lazy_static::lazy_static,
log::*,
solana_sdk::hash::hash,
std::{
cmp,
collections::HashMap,
convert::Into,
env,
sync::{
mpsc::{channel, Receiver, RecvTimeoutError, Sender},
Arc, Barrier, Mutex, Once, RwLock,
},
thread,
time::{Duration, Instant, UNIX_EPOCH},
},
thread,
time::{Duration, Instant},
{cmp, env},
};

type CounterMap = HashMap<(&'static str, u64), CounterPoint>;
Expand Down Expand Up @@ -68,7 +71,7 @@ impl InfluxDbMetricsWriter {
);

let write_url = format!(
"{}/write?db={}&u={}&p={}&precision=ms",
"{}/write?db={}&u={}&p={}&precision=n",
&config.host, &config.db, &config.username, &config.password
);

Expand Down Expand Up @@ -97,8 +100,9 @@ impl MetricsWriter for InfluxDbMetricsWriter {
));
first = false;
}

line.push_str(&format!(" {}\n", &point.timestamp));
let timestamp = point.timestamp.duration_since(UNIX_EPOCH);
let nanos = timestamp.unwrap().as_nanos();
line.push_str(&format!(" {}\n", nanos));
}

let client = reqwest::blocking::Client::builder()
Expand Down Expand Up @@ -537,7 +541,7 @@ mod test {
CounterPoint {
name: "counter",
count: 10,
timestamp: 0,
timestamp: UNIX_EPOCH,
},
Level::Info,
0, // use the same bucket
Expand Down