From 7849d804cf2bf73c2f3e77e03c8b7af73aaa1a06 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Fri, 1 Sep 2023 10:32:37 -0400 Subject: [PATCH] chore(deps): Bump to Rust 1.72.0 (#18389) * chore(deps): Bump to Rust 1.72.0 Signed-off-by: Jesse Szwedko * clippy --fix Signed-off-by: Jesse Szwedko * clippy Signed-off-by: Jesse Szwedko * clippy Signed-off-by: Jesse Szwedko * Add panic documentation Signed-off-by: Jesse Szwedko * Spelling Signed-off-by: Jesse Szwedko * Add clippy allow for arc_with_non_send_sync Signed-off-by: Jesse Szwedko * additional clippy allow Signed-off-by: Jesse Szwedko * Correct clippy name Signed-off-by: Jesse Szwedko * cargo fmt Signed-off-by: Jesse Szwedko * Move clippy allow up Couldn't figure out how to get it working when applying directly to structs with derivative Signed-off-by: Jesse Szwedko * Update lib/vector-buffers/src/topology/acks.rs Co-authored-by: Doug Smith --------- Signed-off-by: Jesse Szwedko Co-authored-by: Doug Smith --- Tiltfile | 2 +- lib/prometheus-parser/src/lib.rs | 8 +- lib/prometheus-parser/src/line.rs | 8 +- lib/vector-buffers/src/test/helpers.rs | 5 ++ lib/vector-buffers/src/test/messages.rs | 1 + lib/vector-buffers/src/test/variant.rs | 2 +- lib/vector-buffers/src/topology/acks.rs | 16 +++- lib/vector-buffers/src/topology/builder.rs | 6 +- .../src/topology/channel/limited_queue.rs | 13 ++- .../src/topology/channel/sender.rs | 2 +- .../src/variants/disk_v2/common.rs | 2 +- .../src/variants/disk_v2/ledger.rs | 2 +- .../src/variants/disk_v2/reader.rs | 2 +- .../variants/disk_v2/tests/model/record.rs | 2 +- lib/vector-common/src/event_test_util.rs | 4 +- lib/vector-common/src/finalization.rs | 2 +- .../src/schema/json_schema.rs | 2 +- lib/vector-config-macros/src/ast/mod.rs | 2 +- .../src/schema/visitors/merge.rs | 6 +- lib/vector-config/src/schema/visitors/mod.rs | 2 +- lib/vector-core/src/config/log_schema.rs | 4 + lib/vector-core/src/event/discriminant.rs | 4 +- lib/vector-core/src/event/log_event.rs | 2 +- lib/vector-core/src/event/lua/event.rs | 2 +- lib/vector-core/src/event/lua/util.rs | 4 + lib/vector-core/src/event/metric/arbitrary.rs | 4 + lib/vector-core/src/event/metric/data.rs | 6 +- lib/vector-core/src/event/metric/mod.rs | 6 +- lib/vector-core/src/event/proto.rs | 2 +- lib/vector-core/src/event/util/log/mod.rs | 4 +- lib/vector-core/src/event/util/mod.rs | 2 +- lib/vector-core/src/event/vrl_target.rs | 8 +- lib/vector-core/src/metrics/ddsketch.rs | 5 +- lib/vector-core/src/sink.rs | 4 +- lib/vector-core/src/stream/driver.rs | 4 +- lib/vector-core/src/stream/mod.rs | 2 +- lib/vector-core/src/tls/settings.rs | 19 ++++- lib/vector-core/src/transform/mod.rs | 31 ++++++- lib/vector-vrl/tests/src/main.rs | 2 +- rust-toolchain.toml | 2 +- src/codecs/encoding/config.rs | 2 +- src/conditions/mod.rs | 2 +- src/conditions/vrl.rs | 2 +- src/sinks/datadog/metrics/encoder.rs | 4 +- src/sinks/datadog/traces/apm_stats/bucket.rs | 2 +- src/sinks/elasticsearch/config.rs | 2 +- src/sinks/kafka/config.rs | 13 ++- src/sinks/loki/sink.rs | 10 +-- src/sinks/prometheus/collector.rs | 2 +- src/sinks/prometheus/exporter.rs | 4 +- src/sinks/s3_common/config.rs | 13 ++- src/sinks/util/adaptive_concurrency/mod.rs | 2 +- src/sinks/util/buffer/metrics/mod.rs | 2 +- src/sinks/util/http.rs | 11 +-- src/sinks/util/mod.rs | 2 + src/sinks/util/sink.rs | 2 +- src/sources/aws_ecs_metrics/mod.rs | 4 +- src/sources/aws_ecs_metrics/parser.rs | 20 ++--- src/sources/exec/mod.rs | 13 ++- src/sources/fluent/mod.rs | 2 +- src/sources/host_metrics/mod.rs | 8 +- src/sources/http_client/client.rs | 52 ++++++------ src/sources/http_server.rs | 2 +- src/sources/journald.rs | 17 ++-- src/sources/kafka.rs | 13 ++- src/sources/prometheus/parser.rs | 80 +++++++++---------- src/sources/prometheus/scrape.rs | 4 +- src/sources/redis/mod.rs | 2 +- src/sources/util/framestream.rs | 2 +- src/topology/test/backpressure.rs | 2 +- src/topology/test/end_to_end.rs | 6 +- src/transforms/lua/v1/mod.rs | 2 +- vdev/src/app.rs | 4 +- vdev/src/commands/crate_versions.rs | 2 +- vdev/src/commands/test_vrl.rs | 2 +- vdev/src/testing/config.rs | 2 +- 76 files changed, 289 insertions(+), 231 deletions(-) diff --git a/Tiltfile b/Tiltfile index 9050565afe259..ead2372a026e7 100644 --- a/Tiltfile +++ b/Tiltfile @@ -7,7 +7,7 @@ load('ext://helm_resource', 'helm_resource', 'helm_repo') docker_build( ref='timberio/vector', context='.', - build_args={'RUST_VERSION': '1.71.1'}, + build_args={'RUST_VERSION': '1.72.0'}, dockerfile='tilt/Dockerfile' ) diff --git a/lib/prometheus-parser/src/lib.rs b/lib/prometheus-parser/src/lib.rs index 82e4ed0033401..0f42d85ed8b7d 100644 --- a/lib/prometheus-parser/src/lib.rs +++ b/lib/prometheus-parser/src/lib.rs @@ -468,7 +468,7 @@ mod test { #[test] fn test_parse_text() { - let input = r##" + let input = r#" # HELP http_requests_total The total number of HTTP requests. # TYPE http_requests_total counter http_requests_total{method="post",code="200"} 1027 1395066363000 @@ -512,7 +512,7 @@ mod test { rpc_duration_seconds{quantile="0.99"} 76656 rpc_duration_seconds_sum 1.7560473e+07 rpc_duration_seconds_count 4.588206224e+09 - "##; + "#; let output = parse_text(input).unwrap(); assert_eq!(output.len(), 7); match_group!(output[0], "http_requests_total", Counter => |metrics: &MetricMap| { @@ -651,7 +651,7 @@ mod test { #[test] fn test_errors() { - let input = r##"name{registry="default" content_type="html"} 1890"##; + let input = r#"name{registry="default" content_type="html"} 1890"#; let error = parse_text(input).unwrap_err(); assert!(matches!( error, @@ -681,7 +681,7 @@ mod test { } )); - let input = r##"name{registry="} 1890"##; + let input = r#"name{registry="} 1890"#; let error = parse_text(input).unwrap_err(); assert!(matches!( error, diff --git a/lib/prometheus-parser/src/line.rs b/lib/prometheus-parser/src/line.rs index f1b5dbe6dacf1..28bd432b451be 100644 --- a/lib/prometheus-parser/src/line.rs +++ b/lib/prometheus-parser/src/line.rs @@ -412,7 +412,7 @@ mod test { assert_eq!(left, tail); assert_eq!(r, ""); - let input = wrap(r#"a\\ asdf"#); + let input = wrap(r"a\\ asdf"); let (left, r) = Metric::parse_escaped_string(&input).unwrap(); assert_eq!(left, tail); assert_eq!(r, "a\\ asdf"); @@ -427,7 +427,7 @@ mod test { assert_eq!(left, tail); assert_eq!(r, "\"\\\n"); - let input = wrap(r#"\\n"#); + let input = wrap(r"\\n"); let (left, r) = Metric::parse_escaped_string(&input).unwrap(); assert_eq!(left, tail); assert_eq!(r, "\\n"); @@ -671,7 +671,7 @@ mod test { #[test] fn test_parse_line() { - let input = r##" + let input = r#" # HELP http_requests_total The total number of HTTP requests. # TYPE http_requests_total counter http_requests_total{method="post",code="200"} 1027 1395066363000 @@ -708,7 +708,7 @@ mod test { rpc_duration_seconds{quantile="0.99"} 76656 rpc_duration_seconds_sum 1.7560473e+07 rpc_duration_seconds_count 2693 - "##; + "#; assert!(input.lines().map(Line::parse).all(|r| r.is_ok())); } } diff --git a/lib/vector-buffers/src/test/helpers.rs b/lib/vector-buffers/src/test/helpers.rs index f942254621277..4dd376be60511 100644 --- a/lib/vector-buffers/src/test/helpers.rs +++ b/lib/vector-buffers/src/test/helpers.rs @@ -40,6 +40,11 @@ macro_rules! await_timeout { }}; } +/// Run a future with a temporary directory. +/// +/// # Panics +/// +/// Will panic if function cannot create a temp directory. pub async fn with_temp_dir(f: F) -> V where F: FnOnce(&Path) -> Fut, diff --git a/lib/vector-buffers/src/test/messages.rs b/lib/vector-buffers/src/test/messages.rs index a3fcfd8e1bd63..6d73d020937eb 100644 --- a/lib/vector-buffers/src/test/messages.rs +++ b/lib/vector-buffers/src/test/messages.rs @@ -33,6 +33,7 @@ macro_rules! message_wrapper { } impl EventCount for $id { + #[allow(clippy::redundant_closure_call)] fn event_count(&self) -> usize { usize::try_from($event_count(self)).unwrap_or(usize::MAX) } diff --git a/lib/vector-buffers/src/test/variant.rs b/lib/vector-buffers/src/test/variant.rs index 0a2a968102e43..e843f12374910 100644 --- a/lib/vector-buffers/src/test/variant.rs +++ b/lib/vector-buffers/src/test/variant.rs @@ -72,7 +72,7 @@ impl Variant { let (sender, receiver) = builder .build(String::from("benches"), Span::none()) .await - .expect("topology build should not fail"); + .unwrap_or_else(|_| unreachable!("topology build should not fail")); (sender, receiver) } diff --git a/lib/vector-buffers/src/topology/acks.rs b/lib/vector-buffers/src/topology/acks.rs index 0311970dfce43..fc6e97d6f52c1 100644 --- a/lib/vector-buffers/src/topology/acks.rs +++ b/lib/vector-buffers/src/topology/acks.rs @@ -208,6 +208,10 @@ where /// /// Acknowledgements should be given by the caller to update the acknowledgement state before /// trying to get any eligible markers. + /// + /// # Panics + /// + /// Will panic if adding ack amount overflows. pub fn add_acknowledgements(&mut self, amount: N) { self.unclaimed_acks = self .unclaimed_acks @@ -315,6 +319,10 @@ where /// /// When other pending markers are present, and the given ID is logically behind the next /// expected marker ID, `Err(MarkerError::MonotonicityViolation)` is returned. + /// + /// # Panics + /// + /// Panics if pending markers is empty when last pending marker is an unknown size. pub fn add_marker( &mut self, id: N, @@ -341,7 +349,7 @@ where let last_marker = self .pending_markers .back_mut() - .expect("pending markers should not be empty"); + .unwrap_or_else(|| unreachable!("pending markers should not be empty")); last_marker.len = PendingMarkerLength::Assumed(len); } @@ -425,13 +433,15 @@ where let PendingMarker { id, data, .. } = self .pending_markers .pop_front() - .expect("pending markers cannot be empty"); + .unwrap_or_else(|| unreachable!("pending markers cannot be empty")); if acks_to_claim > N::min_value() { self.unclaimed_acks = self .unclaimed_acks .checked_sub(&acks_to_claim) - .expect("should not be able to claim more acks than are unclaimed"); + .unwrap_or_else(|| { + unreachable!("should not be able to claim more acks than are unclaimed") + }); } self.acked_marker_id = id.wrapping_add(&len.len()); diff --git a/lib/vector-buffers/src/topology/builder.rs b/lib/vector-buffers/src/topology/builder.rs index 9fc104f31aaed..58565891b536d 100644 --- a/lib/vector-buffers/src/topology/builder.rs +++ b/lib/vector-buffers/src/topology/builder.rs @@ -183,6 +183,8 @@ impl TopologyBuilder { /// This is a convenience method for `vector` as it is used for inter-transform channels, and we /// can simplifying needing to require callers to do all the boilerplate to create the builder, /// create the stage, installing buffer usage metrics that aren't required, and so on. + /// + #[allow(clippy::print_stderr)] pub async fn standalone_memory( max_events: NonZeroUsize, when_full: WhenFull, @@ -193,7 +195,7 @@ impl TopologyBuilder { let (sender, receiver) = memory_buffer .into_buffer_parts(usage_handle.clone()) .await - .expect("should not fail to directly create a memory buffer"); + .unwrap_or_else(|_| unreachable!("should not fail to directly create a memory buffer")); let mode = match when_full { WhenFull::Overflow => WhenFull::Block, @@ -228,7 +230,7 @@ impl TopologyBuilder { let (sender, receiver) = memory_buffer .into_buffer_parts(usage_handle.clone()) .await - .expect("should not fail to directly create a memory buffer"); + .unwrap_or_else(|_| unreachable!("should not fail to directly create a memory buffer")); let mode = match when_full { WhenFull::Overflow => WhenFull::Block, diff --git a/lib/vector-buffers/src/topology/channel/limited_queue.rs b/lib/vector-buffers/src/topology/channel/limited_queue.rs index 609d627e9362c..06434bad84dd6 100644 --- a/lib/vector-buffers/src/topology/channel/limited_queue.rs +++ b/lib/vector-buffers/src/topology/channel/limited_queue.rs @@ -107,14 +107,15 @@ impl LimitedSender { .limiter .clone() .acquire_many_owned(permits_required) - .await else { - return Err(SendError(item)) + .await + else { + return Err(SendError(item)); }; self.inner .data .push((permits, item)) - .expect("acquired permits but channel reported being full"); + .unwrap_or_else(|_| unreachable!("acquired permits but channel reported being full")); self.inner.read_waker.notify_one(); trace!("Sent item."); @@ -130,6 +131,10 @@ impl LimitedSender { /// `Err(TrySendError::Disconnected)` be returned with the given `item`. If the channel has /// insufficient capacity for the item, then `Err(TrySendError::InsufficientCapacity)` will be /// returned with the given `item`. + /// + /// # Panics + /// + /// Will panic if adding ack amount overflows. pub fn try_send(&mut self, item: T) -> Result<(), TrySendError> { // Calculate how many permits we need, and try to acquire them all without waiting. let permits_required = self.get_required_permits_for_item(&item); @@ -151,7 +156,7 @@ impl LimitedSender { self.inner .data .push((permits, item)) - .expect("acquired permits but channel reported being full"); + .unwrap_or_else(|_| unreachable!("acquired permits but channel reported being full")); self.inner.read_waker.notify_one(); trace!("Attempt to send item succeeded."); diff --git a/lib/vector-buffers/src/topology/channel/sender.rs b/lib/vector-buffers/src/topology/channel/sender.rs index 9ec0ebadacc53..a0a30a45fe908 100644 --- a/lib/vector-buffers/src/topology/channel/sender.rs +++ b/lib/vector-buffers/src/topology/channel/sender.rs @@ -201,7 +201,7 @@ impl BufferSender { sent_to_base = false; self.overflow .as_mut() - .expect("overflow must exist") + .unwrap_or_else(|| unreachable!("overflow must exist")) .send(item) .await?; } diff --git a/lib/vector-buffers/src/variants/disk_v2/common.rs b/lib/vector-buffers/src/variants/disk_v2/common.rs index 4ff477d1ed07d..e7fa397acda5f 100644 --- a/lib/vector-buffers/src/variants/disk_v2/common.rs +++ b/lib/vector-buffers/src/variants/disk_v2/common.rs @@ -348,7 +348,7 @@ where return Err(BuildError::InvalidParameter { param_name: "max_record_size", reason: "must be less than 2^64 bytes".to_string(), - }) + }); }; if max_record_size_converted > max_data_file_size { diff --git a/lib/vector-buffers/src/variants/disk_v2/ledger.rs b/lib/vector-buffers/src/variants/disk_v2/ledger.rs index f3209f3ca9c45..b682fb6fd7b02 100644 --- a/lib/vector-buffers/src/variants/disk_v2/ledger.rs +++ b/lib/vector-buffers/src/variants/disk_v2/ledger.rs @@ -735,6 +735,6 @@ where ) .field("writer_done", &self.writer_done.load(Ordering::Acquire)) .field("last_flush", &self.last_flush.load()) - .finish() + .finish_non_exhaustive() } } diff --git a/lib/vector-buffers/src/variants/disk_v2/reader.rs b/lib/vector-buffers/src/variants/disk_v2/reader.rs index 7d4da1faafa1e..e2ad6a81a941c 100644 --- a/lib/vector-buffers/src/variants/disk_v2/reader.rs +++ b/lib/vector-buffers/src/variants/disk_v2/reader.rs @@ -855,7 +855,7 @@ where // If there's an error decoding the item, just fall back to the slow path, // because this file might actually be where we left off, so we don't want // to incorrectly skip ahead or anything. - break + break; }; // We have to remove 1 from the event count here because otherwise the ID would diff --git a/lib/vector-buffers/src/variants/disk_v2/tests/model/record.rs b/lib/vector-buffers/src/variants/disk_v2/tests/model/record.rs index 3fa1669790468..3b13e35ca47e0 100644 --- a/lib/vector-buffers/src/variants/disk_v2/tests/model/record.rs +++ b/lib/vector-buffers/src/variants/disk_v2/tests/model/record.rs @@ -80,7 +80,7 @@ impl fmt::Debug for Record { .field("event_count", &self.event_count) .field("encoded_len", &self.encoded_len()) .field("archived_len", &self.archived_len()) - .finish() + .finish_non_exhaustive() } } diff --git a/lib/vector-common/src/event_test_util.rs b/lib/vector-common/src/event_test_util.rs index cbcfadf663ee9..39b0f8ebd4db8 100644 --- a/lib/vector-common/src/event_test_util.rs +++ b/lib/vector-common/src/event_test_util.rs @@ -15,7 +15,7 @@ pub fn contains_name_once(pattern: &str) -> Result<(), String> { EVENTS_RECORDED.with(|events| { let mut n_events = 0; let mut names = String::new(); - for event in events.borrow().iter() { + for event in &*events.borrow() { if event.ends_with(pattern) { if n_events > 0 { names.push_str(", "); @@ -44,7 +44,7 @@ pub fn clear_recorded_events() { #[allow(clippy::print_stdout)] pub fn debug_print_events() { EVENTS_RECORDED.with(|events| { - for event in events.borrow().iter() { + for event in &*events.borrow() { println!("{event}"); } }); diff --git a/lib/vector-common/src/finalization.rs b/lib/vector-common/src/finalization.rs index 396760b5200cb..48001a2bf8727 100644 --- a/lib/vector-common/src/finalization.rs +++ b/lib/vector-common/src/finalization.rs @@ -76,7 +76,7 @@ impl EventFinalizers { /// Merges the event finalizers from `other` into the collection. pub fn merge(&mut self, other: Self) { - self.0.extend(other.0.into_iter()); + self.0.extend(other.0); } /// Updates the status of all event finalizers in the collection. diff --git a/lib/vector-config-common/src/schema/json_schema.rs b/lib/vector-config-common/src/schema/json_schema.rs index e2b32c31efd18..e737f34c76714 100644 --- a/lib/vector-config-common/src/schema/json_schema.rs +++ b/lib/vector-config-common/src/schema/json_schema.rs @@ -563,7 +563,7 @@ impl Extend for SingleOrVec { fn extend>(&mut self, iter: I) { match self { Self::Single(item) => { - *self = Self::Vec(iter::once(*item.clone()).chain(iter.into_iter()).collect()); + *self = Self::Vec(iter::once(*item.clone()).chain(iter).collect()); } Self::Vec(items) => items.extend(iter), } diff --git a/lib/vector-config-macros/src/ast/mod.rs b/lib/vector-config-macros/src/ast/mod.rs index 1825f9e6fd4e9..b307827d2772f 100644 --- a/lib/vector-config-macros/src/ast/mod.rs +++ b/lib/vector-config-macros/src/ast/mod.rs @@ -4,7 +4,7 @@ use serde_derive_internals::{ast as serde_ast, attr as serde_attr}; mod container; mod field; -pub(self) mod util; +mod util; mod variant; pub use container::Container; diff --git a/lib/vector-config/src/schema/visitors/merge.rs b/lib/vector-config/src/schema/visitors/merge.rs index a05478caef168..28b724d602c5c 100644 --- a/lib/vector-config/src/schema/visitors/merge.rs +++ b/lib/vector-config/src/schema/visitors/merge.rs @@ -128,11 +128,7 @@ fn merge_schema_instance_type( source: Option<&SingleOrVec>, ) { merge_optional_with(destination, source, |existing, new| { - let mut deduped = existing - .into_iter() - .chain(new.into_iter()) - .cloned() - .collect::>(); + let mut deduped = existing.into_iter().chain(new).cloned().collect::>(); deduped.dedup(); *existing = deduped.into(); diff --git a/lib/vector-config/src/schema/visitors/mod.rs b/lib/vector-config/src/schema/visitors/mod.rs index ae5551c0c01d2..7b459b69fc5ba 100644 --- a/lib/vector-config/src/schema/visitors/mod.rs +++ b/lib/vector-config/src/schema/visitors/mod.rs @@ -5,7 +5,7 @@ pub mod scoped_visit; mod unevaluated; #[cfg(test)] -pub(self) mod test; +mod test; pub use self::human_name::GenerateHumanFriendlyNameVisitor; pub use self::inline_single::InlineSingleUseReferencesVisitor; diff --git a/lib/vector-core/src/config/log_schema.rs b/lib/vector-core/src/config/log_schema.rs index a8b0388cda6c0..f5dc90c4188c7 100644 --- a/lib/vector-core/src/config/log_schema.rs +++ b/lib/vector-core/src/config/log_schema.rs @@ -120,6 +120,10 @@ impl LogSchema { /// /// This should only be used where the result will either be cached, /// or performance isn't critical, since this requires memory allocation. + /// + /// # Panics + /// + /// Panics if the path in `self.message_key` is invalid. pub fn owned_message_path(&self) -> OwnedTargetPath { self.message_key .path diff --git a/lib/vector-core/src/event/discriminant.rs b/lib/vector-core/src/event/discriminant.rs index fcbd5d0fa818f..98ff1d996ba12 100644 --- a/lib/vector-core/src/event/discriminant.rs +++ b/lib/vector-core/src/event/discriminant.rs @@ -145,13 +145,13 @@ fn hash_f64(hasher: &mut H, value: f64) { } fn hash_array(hasher: &mut H, array: &[Value]) { - for val in array.iter() { + for val in array { hash_value(hasher, val); } } fn hash_map(hasher: &mut H, map: &BTreeMap) { - for (key, val) in map.iter() { + for (key, val) in map { hasher.write(key.as_bytes()); hash_value(hasher, val); } diff --git a/lib/vector-core/src/event/log_event.rs b/lib/vector-core/src/event/log_event.rs index d249e5133c0d8..f27f2b7af64fb 100644 --- a/lib/vector-core/src/event/log_event.rs +++ b/lib/vector-core/src/event/log_event.rs @@ -476,7 +476,7 @@ impl LogEvent { for field in fields { let field_path = event_path!(field.as_ref()); let Some(incoming_val) = incoming.remove(field_path) else { - continue + continue; }; match self.get_mut(field_path) { None => { diff --git a/lib/vector-core/src/event/lua/event.rs b/lib/vector-core/src/event/lua/event.rs index 559361c2daa77..754bfb4d0e18e 100644 --- a/lib/vector-core/src/event/lua/event.rs +++ b/lib/vector-core/src/event/lua/event.rs @@ -41,7 +41,7 @@ impl<'a> FromLua<'a> for Event { from: value.type_name(), to: "Event", message: Some("Event should be a Lua table".to_string()), - }) + }); }; match (table.raw_get("log")?, table.raw_get("metric")?) { (LuaValue::Table(log), LuaValue::Nil) => { diff --git a/lib/vector-core/src/event/lua/util.rs b/lib/vector-core/src/event/lua/util.rs index 17a058f163f11..cd729fbf67515 100644 --- a/lib/vector-core/src/event/lua/util.rs +++ b/lib/vector-core/src/event/lua/util.rs @@ -41,6 +41,10 @@ pub fn table_is_timestamp(t: &LuaTable<'_>) -> LuaResult { /// # Errors /// /// This function will fail if the table is malformed. +/// +/// # Panics +/// +/// Panics if the resulting timestamp is invalid. #[allow(clippy::needless_pass_by_value)] // constrained by mlua types pub fn table_to_timestamp(t: LuaTable<'_>) -> LuaResult> { let year = t.raw_get("year")?; diff --git a/lib/vector-core/src/event/metric/arbitrary.rs b/lib/vector-core/src/event/metric/arbitrary.rs index 500802d2e572e..b526a98ff8a31 100644 --- a/lib/vector-core/src/event/metric/arbitrary.rs +++ b/lib/vector-core/src/event/metric/arbitrary.rs @@ -17,6 +17,10 @@ impl Arbitrary for MetricValue { type Parameters = (); type Strategy = BoxedStrategy; + // TODO(jszwedko): clippy allow can be removed once + // https://github.com/proptest-rs/proptest/commit/466d59daeca317f815bb8358e8d981bb9bd9431a is + // released + #[allow(clippy::arc_with_non_send_sync)] fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { let strategy = prop_oneof![ realistic_float().prop_map(|value| MetricValue::Counter { value }), diff --git a/lib/vector-core/src/event/metric/data.rs b/lib/vector-core/src/event/metric/data.rs index 0cd68489a1cc7..44823d0b81199 100644 --- a/lib/vector-core/src/event/metric/data.rs +++ b/lib/vector-core/src/event/metric/data.rs @@ -100,9 +100,9 @@ impl MetricData { other.time.interval_ms, ) { (Some(t1), Some(i1), Some(t2), Some(i2)) => { - let Ok(delta_t) = TryInto::::try_into( - t1.timestamp_millis().abs_diff(t2.timestamp_millis()), - ) else { + let Ok(delta_t) = + TryInto::::try_into(t1.timestamp_millis().abs_diff(t2.timestamp_millis())) + else { return false; }; diff --git a/lib/vector-core/src/event/metric/mod.rs b/lib/vector-core/src/event/metric/mod.rs index 9d29e4fd054ae..f27c24ae119fb 100644 --- a/lib/vector-core/src/event/metric/mod.rs +++ b/lib/vector-core/src/event/metric/mod.rs @@ -578,7 +578,7 @@ pub(crate) fn zip_samples( ) -> Vec { values .into_iter() - .zip(rates.into_iter()) + .zip(rates) .map(|(value, rate)| Sample { value, rate }) .collect() } @@ -590,7 +590,7 @@ pub(crate) fn zip_buckets( ) -> Vec { limits .into_iter() - .zip(counts.into_iter()) + .zip(counts) .map(|(upper_limit, count)| Bucket { upper_limit, count }) .collect() } @@ -602,7 +602,7 @@ pub(crate) fn zip_quantiles( ) -> Vec { quantiles .into_iter() - .zip(values.into_iter()) + .zip(values) .map(|(quantile, value)| Quantile { quantile, value }) .collect() } diff --git a/lib/vector-core/src/event/proto.rs b/lib/vector-core/src/event/proto.rs index a00a2c7a317fd..6e488c13503bf 100644 --- a/lib/vector-core/src/event/proto.rs +++ b/lib/vector-core/src/event/proto.rs @@ -229,7 +229,7 @@ impl From for event::Metric { // The current Vector encoding includes copies of the "single" values of tags in `tags_v2` // above. This `extend` will re-add those values, forcing them to become the last added in // the value set. - tags.extend(metric.tags_v1.into_iter()); + tags.extend(metric.tags_v1); let tags = (!tags.is_empty()).then_some(tags); let value = event::MetricValue::from(metric.value.unwrap()); diff --git a/lib/vector-core/src/event/util/log/mod.rs b/lib/vector-core/src/event/util/log/mod.rs index 87bfbc7231fe8..dc619db32c09d 100644 --- a/lib/vector-core/src/event/util/log/mod.rs +++ b/lib/vector-core/src/event/util/log/mod.rs @@ -4,10 +4,10 @@ mod keys; pub use all_fields::{all_fields, all_fields_non_object_root, all_metadata_fields}; pub use keys::keys; -pub(self) use super::Value; +use super::Value; #[cfg(test)] -pub(self) mod test { +mod test { use std::collections::BTreeMap; use serde_json::Value as JsonValue; diff --git a/lib/vector-core/src/event/util/mod.rs b/lib/vector-core/src/event/util/mod.rs index 7f8d1a48124b6..952cbfe714256 100644 --- a/lib/vector-core/src/event/util/mod.rs +++ b/lib/vector-core/src/event/util/mod.rs @@ -1,3 +1,3 @@ pub mod log; -pub(self) use super::Value; +use super::Value; diff --git a/lib/vector-core/src/event/vrl_target.rs b/lib/vector-core/src/event/vrl_target.rs index 8dd647e741c6a..b29439a9a1e43 100644 --- a/lib/vector-core/src/event/vrl_target.rs +++ b/lib/vector-core/src/event/vrl_target.rs @@ -1246,7 +1246,7 @@ mod test { MetricValue::Counter { value: 1.23 }, ); - let validpaths_get = vec![ + let validpaths_get = [ ".name", ".namespace", ".timestamp", @@ -1255,7 +1255,7 @@ mod test { ".type", ]; - let validpaths_set = vec![".name", ".namespace", ".timestamp", ".kind", ".tags"]; + let validpaths_set = [".name", ".namespace", ".timestamp", ".kind", ".tags"]; let info = ProgramInfo { fallible: false, @@ -1340,7 +1340,9 @@ mod test { )])) ); - let VrlTarget::Metric { metric, .. } = target else {unreachable!()}; + let VrlTarget::Metric { metric, .. } = target else { + unreachable!() + }; // get single value (should be the last one) assert_eq!(metric.tag_value("foo"), Some("b".into())); diff --git a/lib/vector-core/src/metrics/ddsketch.rs b/lib/vector-core/src/metrics/ddsketch.rs index 39da48d591197..3d0f80bb50402 100644 --- a/lib/vector-core/src/metrics/ddsketch.rs +++ b/lib/vector-core/src/metrics/ddsketch.rs @@ -643,7 +643,8 @@ impl AgentDDSketch { // generally enforced at the source level by converting from cumulative buckets, or // enforced by the internal structures that hold bucketed data i.e. Vector's internal // `Histogram` data structure used for collecting histograms from `metrics`. - let count = u32::try_from(bucket.count).expect("count range has already been checked."); + let count = u32::try_from(bucket.count) + .unwrap_or_else(|_| unreachable!("count range has already been checked.")); self.insert_interpolate_bucket(lower, upper, count); lower = bucket.upper_limit; @@ -882,7 +883,7 @@ impl BinMap { Some( self.keys .into_iter() - .zip(self.counts.into_iter()) + .zip(self.counts) .map(|(k, n)| Bin { k, n }) .collect(), ) diff --git a/lib/vector-core/src/sink.rs b/lib/vector-core/src/sink.rs index fb47b6f4d50dd..a3e2e66e08c17 100644 --- a/lib/vector-core/src/sink.rs +++ b/lib/vector-core/src/sink.rs @@ -131,7 +131,9 @@ impl + Send + Unpin> EventSink { fn flush_queue(self: &mut Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { while self.queue.is_some() { poll_ready_ok!(self.sink.poll_ready_unpin(cx)); - let Some(event) = self.next_event() else {break}; + let Some(event) = self.next_event() else { + break; + }; if let Err(err) = self.sink.start_send_unpin(event) { return Poll::Ready(Err(err)); } diff --git a/lib/vector-core/src/stream/driver.rs b/lib/vector-core/src/stream/driver.rs index 1a1ce79e1b27a..73db71d33f31b 100644 --- a/lib/vector-core/src/stream/driver.rs +++ b/lib/vector-core/src/stream/driver.rs @@ -133,7 +133,7 @@ where // We've got an input batch to process and the service is ready to accept a request. maybe_ready = poll_fn(|cx| service.poll_ready(cx)), if next_batch.is_some() => { let mut batch = next_batch.take() - .expect("batch should be populated"); + .unwrap_or_else(|| unreachable!("batch should be populated")); let mut maybe_ready = Some(maybe_ready); while !batch.is_empty() { @@ -155,7 +155,7 @@ where }, }; - let mut req = batch.pop_front().expect("batch should not be empty"); + let mut req = batch.pop_front().unwrap_or_else(|| unreachable!("batch should not be empty")); seq_num += 1; let request_id = seq_num; diff --git a/lib/vector-core/src/stream/mod.rs b/lib/vector-core/src/stream/mod.rs index 5101109694032..64e4edc3247f9 100644 --- a/lib/vector-core/src/stream/mod.rs +++ b/lib/vector-core/src/stream/mod.rs @@ -7,5 +7,5 @@ mod partitioned_batcher; pub use concurrent_map::ConcurrentMap; pub use driver::{Driver, DriverResponse}; -pub(self) use futures_unordered_count::FuturesUnorderedCount; +use futures_unordered_count::FuturesUnorderedCount; pub use partitioned_batcher::{BatcherSettings, ExpirationQueue, PartitionedBatcher}; diff --git a/lib/vector-core/src/tls/settings.rs b/lib/vector-core/src/tls/settings.rs index 175660be97649..d7401049f77da 100644 --- a/lib/vector-core/src/tls/settings.rs +++ b/lib/vector-core/src/tls/settings.rs @@ -206,6 +206,11 @@ impl TlsSettings { }) } + /// Returns the identity as PKCS12 + /// + /// # Panics + /// + /// Panics if the identity is invalid. fn identity(&self) -> Option { // This data was test-built previously, so we can just use it // here and expect the results will not fail. This can all be @@ -219,6 +224,11 @@ impl TlsSettings { }) } + /// Returns the identity as PEM data + /// + /// # Panics + /// + /// Panics if the identity is missing, invalid, or the authorities to chain are invalid. pub fn identity_pem(&self) -> Option<(Vec, Vec)> { self.identity().map(|identity| { let mut cert = identity @@ -244,6 +254,11 @@ impl TlsSettings { }) } + /// Returns the authorities as PEM data + /// + /// # Panics + /// + /// Panics if the authority is invalid. pub fn authorities_pem(&self) -> impl Iterator> + '_ { self.authorities.iter().map(|authority| { authority @@ -351,7 +366,7 @@ impl TlsConfig { None => Ok(None), Some(protocols) => { let mut data: Vec = Vec::new(); - for str in protocols.iter() { + for str in protocols { data.push(str.len().try_into().context(EncodeAlpnProtocolsSnafu)?); data.append(&mut str.clone().into_bytes()); } @@ -498,7 +513,7 @@ impl fmt::Debug for TlsSettings { f.debug_struct("TlsSettings") .field("verify_certificate", &self.verify_certificate) .field("verify_hostname", &self.verify_hostname) - .finish() + .finish_non_exhaustive() } } diff --git a/lib/vector-core/src/transform/mod.rs b/lib/vector-core/src/transform/mod.rs index b098837f49677..4083f34cc8e99 100644 --- a/lib/vector-core/src/transform/mod.rs +++ b/lib/vector-core/src/transform/mod.rs @@ -270,11 +270,17 @@ impl TransformOutputs { buf: &mut TransformOutputsBuf, ) -> Result<(), Box> { if let Some(primary) = self.primary_output.as_mut() { - let buf = buf.primary_buffer.as_mut().expect("mismatched outputs"); + let buf = buf + .primary_buffer + .as_mut() + .unwrap_or_else(|| unreachable!("mismatched outputs")); Self::send_single_buffer(buf, primary).await?; } for (key, buf) in &mut buf.named_buffers { - let output = self.named_outputs.get_mut(key).expect("unknown output"); + let output = self + .named_outputs + .get_mut(key) + .unwrap_or_else(|| unreachable!("unknown output")); Self::send_single_buffer(buf, output).await?; } Ok(()) @@ -353,7 +359,11 @@ impl TransformOutputsBuf { } } - /// Adds a new event to the transform output buffer + /// Adds a new event to the named output buffer. + /// + /// # Panics + /// + /// Panics if there is no output with the given name. pub fn push(&mut self, name: Option<&str>, event: Event) { match name { Some(name) => self.named_buffers.get_mut(name), @@ -363,6 +373,11 @@ impl TransformOutputsBuf { .push(event); } + /// Drains the default output buffer. + /// + /// # Panics + /// + /// Panics if there is no default output. #[cfg(any(feature = "test", test))] pub fn drain(&mut self) -> impl Iterator + '_ { self.primary_buffer @@ -371,6 +386,11 @@ impl TransformOutputsBuf { .drain() } + /// Drains the named output buffer. + /// + /// # Panics + /// + /// Panics if there is no output with the given name. #[cfg(any(feature = "test", test))] pub fn drain_named(&mut self, name: &str) -> impl Iterator + '_ { self.named_buffers @@ -379,6 +399,11 @@ impl TransformOutputsBuf { .drain() } + /// Takes the default output buffer. + /// + /// # Panics + /// + /// Panics if there is no default output. #[cfg(any(feature = "test", test))] pub fn take_primary(&mut self) -> OutputBuffer { std::mem::take(self.primary_buffer.as_mut().expect("no default output")) diff --git a/lib/vector-vrl/tests/src/main.rs b/lib/vector-vrl/tests/src/main.rs index 037b8ebedd614..9452c73b97ece 100644 --- a/lib/vector-vrl/tests/src/main.rs +++ b/lib/vector-vrl/tests/src/main.rs @@ -118,7 +118,7 @@ fn get_tests(cmd: &Cmd) -> Vec { let path = entry.ok()?; Some(Test::from_path(&path)) }) - .chain(docs::tests(cmd.ignore_cue).into_iter()) + .chain(docs::tests(cmd.ignore_cue)) .chain(get_tests_from_functions( vector_vrl_functions::all() .into_iter() diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 08e602710eeb9..748be2713f5b3 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.71.1" +channel = "1.72.0" profile = "default" diff --git a/src/codecs/encoding/config.rs b/src/codecs/encoding/config.rs index 5e24db0a9b162..0ae7b1648a026 100644 --- a/src/codecs/encoding/config.rs +++ b/src/codecs/encoding/config.rs @@ -228,7 +228,7 @@ mod test { let encoding = serde_json::from_str::(string).unwrap(); let (framing, serializer) = encoding.config(); - assert!(matches!(framing, None)); + assert!(framing.is_none()); assert!(matches!(serializer, SerializerConfig::Json(_))); let transformer = encoding.transformer(); diff --git a/src/conditions/mod.rs b/src/conditions/mod.rs index 098900959cac5..a55c84a2f163a 100644 --- a/src/conditions/mod.rs +++ b/src/conditions/mod.rs @@ -3,7 +3,7 @@ use vector_config::configurable_component; use crate::event::Event; -pub(self) mod datadog_search; +mod datadog_search; pub(crate) mod is_log; pub(crate) mod is_metric; pub(crate) mod is_trace; diff --git a/src/conditions/vrl.rs b/src/conditions/vrl.rs index 6db16feb474eb..8011f8cba4701 100644 --- a/src/conditions/vrl.rs +++ b/src/conditions/vrl.rs @@ -43,7 +43,7 @@ impl ConditionalConfig for VrlConfig { let functions = vrl::stdlib::all() .into_iter() - .chain(enrichment::vrl_functions().into_iter()) + .chain(enrichment::vrl_functions()) .chain(vector_vrl_functions::all()) .collect::>(); diff --git a/src/sinks/datadog/metrics/encoder.rs b/src/sinks/datadog/metrics/encoder.rs index dfe5260897829..0e0ad6c685291 100644 --- a/src/sinks/datadog/metrics/encoder.rs +++ b/src/sinks/datadog/metrics/encoder.rs @@ -785,7 +785,9 @@ mod tests { { let mut sketches = Vec::new(); for metric in metrics { - let MetricValue::Sketch { sketch } = metric.value() else { panic!("must be sketch") }; + let MetricValue::Sketch { sketch } = metric.value() else { + panic!("must be sketch") + }; match sketch { MetricSketch::AgentDDSketch(ddsketch) => { // Don't encode any empty sketches. diff --git a/src/sinks/datadog/traces/apm_stats/bucket.rs b/src/sinks/datadog/traces/apm_stats/bucket.rs index 25beaf3d60242..a1d0a9f198508 100644 --- a/src/sinks/datadog/traces/apm_stats/bucket.rs +++ b/src/sinks/datadog/traces/apm_stats/bucket.rs @@ -94,7 +94,7 @@ fn convert_stores(agent_sketch: &AgentDDSketch) -> (BTreeMap, BTreeMap bin_map .keys .into_iter() - .zip(bin_map.counts.into_iter()) + .zip(bin_map.counts) .for_each(|(k, n)| { match k.signum() { 0 => zeroes = n as f64, diff --git a/src/sinks/elasticsearch/config.rs b/src/sinks/elasticsearch/config.rs index 7ec052a4f8fde..6df73422ee1cd 100644 --- a/src/sinks/elasticsearch/config.rs +++ b/src/sinks/elasticsearch/config.rs @@ -194,7 +194,7 @@ fn default_doc_type() -> String { } fn query_examples() -> HashMap { - HashMap::<_, _>::from_iter([("X-Powered-By".to_owned(), "Vector".to_owned())].into_iter()) + HashMap::<_, _>::from_iter([("X-Powered-By".to_owned(), "Vector".to_owned())]) } impl Default for ElasticsearchConfig { diff --git a/src/sinks/kafka/config.rs b/src/sinks/kafka/config.rs index a40c6525da835..692718b9096de 100644 --- a/src/sinks/kafka/config.rs +++ b/src/sinks/kafka/config.rs @@ -131,14 +131,11 @@ const fn default_message_timeout_ms() -> Duration { } fn example_librdkafka_options() -> HashMap { - HashMap::<_, _>::from_iter( - [ - ("client.id".to_string(), "${ENV_VAR}".to_string()), - ("fetch.error.backoff.ms".to_string(), "1000".to_string()), - ("socket.send.buffer.bytes".to_string(), "100".to_string()), - ] - .into_iter(), - ) + HashMap::<_, _>::from_iter([ + ("client.id".to_string(), "${ENV_VAR}".to_string()), + ("fetch.error.backoff.ms".to_string(), "1000".to_string()), + ("socket.send.buffer.bytes".to_string(), "100".to_string()), + ]) } /// Used to determine the options to set in configs, since both Kafka consumers and producers have diff --git a/src/sinks/loki/sink.rs b/src/sinks/loki/sink.rs index bb0db3853cf7f..0d9cb9fbb43e1 100644 --- a/src/sinks/loki/sink.rs +++ b/src/sinks/loki/sink.rs @@ -637,7 +637,7 @@ mod tests { remove_timestamp: false, }; - let message = r###" + let message = r#" { "kubernetes": { "pod_labels": { @@ -651,7 +651,7 @@ mod tests { "cluster_version": "1.2.3" } } - "###; + "#; let msg: BTreeMap = serde_json::from_str(message)?; let event = Event::Log(LogEvent::from(msg)); let record = encoder.encode_event(event).unwrap(); @@ -687,7 +687,7 @@ mod tests { remove_timestamp: false, }; - let message = r###" + let message = r#" { "map1": { "key1": "val1" @@ -696,7 +696,7 @@ mod tests { "l1_key1": "val2" } } - "###; + "#; let msg: BTreeMap = serde_json::from_str(message)?; let event = Event::Log(LogEvent::from(msg)); let record = encoder.encode_event(event).unwrap(); @@ -704,7 +704,7 @@ mod tests { assert_eq!(record.labels.len(), 1); let labels: HashMap = record.labels.into_iter().collect(); // EventEncoder.labels is type HashMap (unordered) -> both values can be valid - assert!(vec!["val1".to_string(), "val2".to_string()].contains(&labels["l1_key1"])); + assert!(["val1".to_string(), "val2".to_string()].contains(&labels["l1_key1"])); Ok(()) } diff --git a/src/sinks/prometheus/collector.rs b/src/sinks/prometheus/collector.rs index 0a3714485e11d..d5429259e8dee 100644 --- a/src/sinks/prometheus/collector.rs +++ b/src/sinks/prometheus/collector.rs @@ -923,7 +923,7 @@ mod tests { let tags = metric_tags!( "code" => "200", "quoted" => r#"host"1""#, - "path" => r#"c:\Windows"#, + "path" => r"c:\Windows", ); let metric = Metric::new( "something".to_owned(), diff --git a/src/sinks/prometheus/exporter.rs b/src/sinks/prometheus/exporter.rs index 5f75b4af34958..63a882e8dfa98 100644 --- a/src/sinks/prometheus/exporter.rs +++ b/src/sinks/prometheus/exporter.rs @@ -1041,11 +1041,11 @@ mod tests { ) } - pub(self) fn create_metric(name: Option, value: MetricValue) -> (String, Event) { + fn create_metric(name: Option, value: MetricValue) -> (String, Event) { create_metric_with_tags(name, value, Some(metric_tags!("some_tag" => "some_value"))) } - pub(self) fn create_metric_with_tags( + fn create_metric_with_tags( name: Option, value: MetricValue, tags: Option, diff --git a/src/sinks/s3_common/config.rs b/src/sinks/s3_common/config.rs index 82f739f0605fa..115d4b7e9596b 100644 --- a/src/sinks/s3_common/config.rs +++ b/src/sinks/s3_common/config.rs @@ -125,14 +125,11 @@ pub struct S3Options { } fn example_tags() -> HashMap { - HashMap::<_, _>::from_iter( - [ - ("Project".to_string(), "Blue".to_string()), - ("Classification".to_string(), "confidential".to_string()), - ("PHI".to_string(), "True".to_string()), - ] - .into_iter(), - ) + HashMap::<_, _>::from_iter([ + ("Project".to_string(), "Blue".to_string()), + ("Classification".to_string(), "confidential".to_string()), + ("PHI".to_string(), "True".to_string()), + ]) } /// S3 storage classes. diff --git a/src/sinks/util/adaptive_concurrency/mod.rs b/src/sinks/util/adaptive_concurrency/mod.rs index 228d93d4e28b8..ab5afd6c53f87 100644 --- a/src/sinks/util/adaptive_concurrency/mod.rs +++ b/src/sinks/util/adaptive_concurrency/mod.rs @@ -17,7 +17,7 @@ pub(crate) use layer::AdaptiveConcurrencyLimitLayer; pub(crate) use service::AdaptiveConcurrencyLimit; use vector_config::configurable_component; -pub(self) fn instant_now() -> std::time::Instant { +fn instant_now() -> std::time::Instant { tokio::time::Instant::now().into() } diff --git a/src/sinks/util/buffer/metrics/mod.rs b/src/sinks/util/buffer/metrics/mod.rs index 9e1e98f28d2bc..877cdc9c4bcc1 100644 --- a/src/sinks/util/buffer/metrics/mod.rs +++ b/src/sinks/util/buffer/metrics/mod.rs @@ -125,7 +125,7 @@ pub fn compress_distribution(samples: &mut Vec) -> Vec { } #[cfg(test)] -pub(self) mod tests { +mod tests { use similar_asserts::assert_eq; use vector_core::event::metric::{MetricKind, MetricKind::*, MetricValue, StatisticKind}; use vector_core::metric_tags; diff --git a/src/sinks/util/http.rs b/src/sinks/util/http.rs index 93a56f5bf6097..f348557eb2e0c 100644 --- a/src/sinks/util/http.rs +++ b/src/sinks/util/http.rs @@ -560,13 +560,10 @@ pub struct RequestConfig { } fn headers_examples() -> IndexMap { - IndexMap::<_, _>::from_iter( - [ - ("Accept".to_owned(), "text/plain".to_owned()), - ("X-My-Custom-Header".to_owned(), "A-Value".to_owned()), - ] - .into_iter(), - ) + IndexMap::<_, _>::from_iter([ + ("Accept".to_owned(), "text/plain".to_owned()), + ("X-My-Custom-Header".to_owned(), "A-Value".to_owned()), + ]) } impl RequestConfig { diff --git a/src/sinks/util/mod.rs b/src/sinks/util/mod.rs index 6b26d07a6ed51..1fcae4842cd7f 100644 --- a/src/sinks/util/mod.rs +++ b/src/sinks/util/mod.rs @@ -1,5 +1,7 @@ pub mod adaptive_concurrency; pub mod auth; +// https://github.com/mcarton/rust-derivative/issues/112 +#[allow(clippy::incorrect_clone_impl_on_copy_type)] pub mod batch; pub mod buffer; pub mod builder; diff --git a/src/sinks/util/sink.rs b/src/sinks/util/sink.rs index 3305573899acb..2b318b4ef7ab1 100644 --- a/src/sinks/util/sink.rs +++ b/src/sinks/util/sink.rs @@ -666,7 +666,7 @@ mod tests { // Services future will be spawned and work between `yield_now` calls. let svc = tower::service_fn(|req: Vec| async move { let duration = match req[0].0 { - 1 | 2 | 3 => Duration::from_secs(1), + 1..=3 => Duration::from_secs(1), // The 4th request will introduce some sort of // latency spike to ensure later events don't diff --git a/src/sources/aws_ecs_metrics/mod.rs b/src/sources/aws_ecs_metrics/mod.rs index 5829d4e004116..09dc5f92c2b89 100644 --- a/src/sources/aws_ecs_metrics/mod.rs +++ b/src/sources/aws_ecs_metrics/mod.rs @@ -272,7 +272,7 @@ mod test { let make_svc = make_service_fn(|_| async { Ok::<_, Error>(service_fn(|_| async { Ok::<_, Error>(Response::new(Body::from( - r##" + r#" { "0cf54b87-f0f0-4044-b9d6-20dc54d5c414-3822082590": { "read": "2020-09-23T20:32:26.292561674Z", @@ -563,7 +563,7 @@ mod test { } } } - "##, + "#, ))) })) }); diff --git a/src/sources/aws_ecs_metrics/parser.rs b/src/sources/aws_ecs_metrics/parser.rs index c4e6fced6af72..2d1bbf9def0b2 100644 --- a/src/sources/aws_ecs_metrics/parser.rs +++ b/src/sources/aws_ecs_metrics/parser.rs @@ -579,7 +579,7 @@ mod test { #[test] fn parse_block_io_metrics() { - let json = r##" + let json = r#" { "0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352": { "read": "2018-11-14T08:09:10.000000011Z", @@ -609,7 +609,7 @@ mod test { "sectors_recursive": [] } } - }"##; + }"#; assert_event_data_eq!( parse(json.as_bytes(), Some(namespace())).unwrap(), @@ -646,7 +646,7 @@ mod test { #[test] fn parse_cpu_metrics() { - let json = r##" + let json = r#" { "0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352": { "read": "2018-11-14T08:09:10.000000011Z", @@ -673,7 +673,7 @@ mod test { } } } - }"##; + }"#; assert_event_data_eq!( parse(json.as_bytes(), Some(namespace())).unwrap(), @@ -804,7 +804,7 @@ mod test { #[test] fn parse_precpu_metrics() { - let json = r##" + let json = r#" { "0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352": { "read": "2018-11-14T08:09:10.000000011Z", @@ -831,7 +831,7 @@ mod test { } } } - }"##; + }"#; assert_event_data_eq!( parse(json.as_bytes(), Some(namespace())).unwrap(), @@ -962,7 +962,7 @@ mod test { #[test] fn parse_memory_metrics() { - let json = r##" + let json = r#" { "0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352": { "read": "2018-11-14T08:09:10.000000011Z", @@ -1008,7 +1008,7 @@ mod test { "limit": 9223372036854771712 } } - }"##; + }"#; let metrics = parse(json.as_bytes(), Some(namespace())).unwrap(); @@ -1087,7 +1087,7 @@ mod test { #[test] fn parse_network_metrics() { - let json = r##" + let json = r#" { "0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352": { "read": "2018-11-14T08:09:10.000000011Z", @@ -1106,7 +1106,7 @@ mod test { } } } - }"##; + }"#; let metrics = parse(json.as_bytes(), Some(namespace())).unwrap(); diff --git a/src/sources/exec/mod.rs b/src/sources/exec/mod.rs index bb853545648ad..1b43e05258d6b 100644 --- a/src/sources/exec/mod.rs +++ b/src/sources/exec/mod.rs @@ -192,14 +192,11 @@ const fn default_include_stderr() -> bool { } fn environment_examples() -> HashMap { - HashMap::<_, _>::from_iter( - [ - ("LANG".to_owned(), "es_ES.UTF-8".to_owned()), - ("TZ".to_owned(), "Etc/UTC".to_owned()), - ("PATH".to_owned(), "/bin:/usr/bin:/usr/local/bin".to_owned()), - ] - .into_iter(), - ) + HashMap::<_, _>::from_iter([ + ("LANG".to_owned(), "es_ES.UTF-8".to_owned()), + ("TZ".to_owned(), "Etc/UTC".to_owned()), + ("PATH".to_owned(), "/bin:/usr/bin:/usr/local/bin".to_owned()), + ]) } fn get_hostname() -> Option { diff --git a/src/sources/fluent/mod.rs b/src/sources/fluent/mod.rs index f6ae0955d15eb..d8d559cbefa1b 100644 --- a/src/sources/fluent/mod.rs +++ b/src/sources/fluent/mod.rs @@ -856,7 +856,7 @@ mod tests { #[tokio::test] async fn ack_delivered_without_chunk() { let (result, output) = check_acknowledgements(EventStatus::Delivered, false).await; - assert!(matches!(result, Err(_))); // the `_` inside this error is `Elapsed` + assert!(result.is_err()); // the `_` inside this error is `Elapsed` assert!(output.is_empty()); } diff --git a/src/sources/host_metrics/mod.rs b/src/sources/host_metrics/mod.rs index 18fb45cb91fc8..636169ccbf547 100644 --- a/src/sources/host_metrics/mod.rs +++ b/src/sources/host_metrics/mod.rs @@ -71,7 +71,7 @@ pub enum Collector { /// Filtering configuration. #[configurable_component] #[derive(Clone, Debug, Default)] -pub(self) struct FilterList { +struct FilterList { /// Any patterns which should be included. /// /// The patterns are matched using globbing. @@ -483,7 +483,7 @@ impl MetricsBuffer { } } -pub(self) fn filter_result_sync(result: Result, message: &'static str) -> Option +fn filter_result_sync(result: Result, message: &'static str) -> Option where E: std::error::Error, { @@ -492,7 +492,7 @@ where .ok() } -pub(self) async fn filter_result(result: Result, message: &'static str) -> Option +async fn filter_result(result: Result, message: &'static str) -> Option where E: std::error::Error, { @@ -627,7 +627,7 @@ impl From for String { } #[cfg(test)] -pub(self) mod tests { +mod tests { use crate::test_util::components::{run_and_assert_source_compliance, SOURCE_TAGS}; use std::{collections::HashSet, future::Future, time::Duration}; diff --git a/src/sources/http_client/client.rs b/src/sources/http_client/client.rs index a6c80782d5f44..1f13c1dc1f490 100644 --- a/src/sources/http_client/client.rs +++ b/src/sources/http_client/client.rs @@ -124,37 +124,31 @@ const fn default_http_method() -> HttpMethod { } fn query_examples() -> HashMap> { - HashMap::<_, _>::from_iter( - [ - ("field".to_owned(), vec!["value".to_owned()]), - ( - "fruit".to_owned(), - vec!["mango".to_owned(), "papaya".to_owned(), "kiwi".to_owned()], - ), - ] - .into_iter(), - ) + HashMap::<_, _>::from_iter([ + ("field".to_owned(), vec!["value".to_owned()]), + ( + "fruit".to_owned(), + vec!["mango".to_owned(), "papaya".to_owned(), "kiwi".to_owned()], + ), + ]) } fn headers_examples() -> HashMap> { - HashMap::<_, _>::from_iter( - [ - ( - "Accept".to_owned(), - vec!["text/plain".to_owned(), "text/html".to_owned()], - ), - ( - "X-My-Custom-Header".to_owned(), - vec![ - "a".to_owned(), - "vector".to_owned(), - "of".to_owned(), - "values".to_owned(), - ], - ), - ] - .into_iter(), - ) + HashMap::<_, _>::from_iter([ + ( + "Accept".to_owned(), + vec!["text/plain".to_owned(), "text/html".to_owned()], + ), + ( + "X-My-Custom-Header".to_owned(), + vec![ + "a".to_owned(), + "vector".to_owned(), + "of".to_owned(), + "values".to_owned(), + ], + ), + ]) } impl Default for HttpClientConfig { @@ -290,7 +284,7 @@ impl HttpClientContext { loop { match self.decoder.decode_eof(buf) { Ok(Some((next, _))) => { - events.extend(next.into_iter()); + events.extend(next); } Ok(None) => break, Err(error) => { diff --git a/src/sources/http_server.rs b/src/sources/http_server.rs index 0be8b5e4e2117..92507c9b1998a 100644 --- a/src/sources/http_server.rs +++ b/src/sources/http_server.rs @@ -454,7 +454,7 @@ impl HttpSource for SimpleHttpSource { loop { match decoder.decode_eof(&mut bytes) { Ok(Some((next, _))) => { - events.extend(next.into_iter()); + events.extend(next); } Ok(None) => break, Err(error) => { diff --git a/src/sources/journald.rs b/src/sources/journald.rs index d758067cda8e6..3470e813d62a3 100644 --- a/src/sources/journald.rs +++ b/src/sources/journald.rs @@ -209,16 +209,13 @@ const fn default_batch_size() -> usize { } fn matches_examples() -> HashMap> { - HashMap::<_, _>::from_iter( - [ - ( - "_SYSTEMD_UNIT".to_owned(), - vec!["sshd.service".to_owned(), "ntpd.service".to_owned()], - ), - ("_TRANSPORT".to_owned(), vec!["kernel".to_owned()]), - ] - .into_iter(), - ) + HashMap::<_, _>::from_iter([ + ( + "_SYSTEMD_UNIT".to_owned(), + vec!["sshd.service".to_owned(), "ntpd.service".to_owned()], + ), + ("_TRANSPORT".to_owned(), vec!["kernel".to_owned()]), + ]) } impl JournaldConfig { diff --git a/src/sources/kafka.rs b/src/sources/kafka.rs index 56383d9f7b566..91eb05c5ed399 100644 --- a/src/sources/kafka.rs +++ b/src/sources/kafka.rs @@ -274,14 +274,11 @@ const fn example_auto_offset_reset_values() -> [&'static str; 7] { } fn example_librdkafka_options() -> HashMap { - HashMap::<_, _>::from_iter( - [ - ("client.id".to_string(), "${ENV_VAR}".to_string()), - ("fetch.error.backoff.ms".to_string(), "1000".to_string()), - ("socket.send.buffer.bytes".to_string(), "100".to_string()), - ] - .into_iter(), - ) + HashMap::<_, _>::from_iter([ + ("client.id".to_string(), "${ENV_VAR}".to_string()), + ("fetch.error.backoff.ms".to_string(), "1000".to_string()), + ("socket.send.buffer.bytes".to_string(), "100".to_string()), + ]) } impl_generate_config_from_default!(KafkaSourceConfig); diff --git a/src/sources/prometheus/parser.rs b/src/sources/prometheus/parser.rs index 647571de9e211..36c5d8dc57ed3 100644 --- a/src/sources/prometheus/parser.rs +++ b/src/sources/prometheus/parser.rs @@ -198,10 +198,10 @@ mod test { #[test] fn test_counter_nan() { - let exp = r##" + let exp = r#" # TYPE name counter name{labelname="val1",basename="basevalue"} NaN - "##; + "#; match parse_text(exp).unwrap()[0].value() { MetricValue::Counter { value } => { @@ -213,7 +213,7 @@ mod test { #[test] fn test_counter_weird() { - let exp = r##" + let exp = r#" # A normal comment. # # TYPE name counter @@ -223,7 +223,7 @@ mod test { # TYPE name2 counter name2{labelname="val2" ,basename = "basevalue2" } +Inf 1612411506789 name2{ labelname = "val1" , }-Inf 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), @@ -265,12 +265,12 @@ mod test { #[test] fn test_counter_tags_and_timestamp() { - let exp = r##" + let exp = r#" # HELP http_requests_total The total number of HTTP requests. # TYPE http_requests_total counter http_requests_total{method="post",code="200"} 1027 1395066363000 http_requests_total{method="post",code="400"} 3 1395066363000 - "##; + "#; assert_event_data_eq!( parse_text(exp), @@ -354,9 +354,9 @@ mod test { #[test] fn test_gauge_minimalistic_escaped() { - let exp = r##" + let exp = r#" msdos_file_access_time_seconds{path="C:\\DIR\\FILE.TXT",error="Cannot find file:\n\"FILE.TXT\""} 1.458255915e9 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), @@ -377,11 +377,11 @@ mod test { #[test] fn test_tag_value_contain_bracket() { - let exp = r##" + let exp = r#" # HELP name counter # TYPE name counter name{tag="}"} 0 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), Ok(vec![Metric::new( @@ -396,11 +396,11 @@ mod test { #[test] fn test_parse_tag_value_contain_comma() { - let exp = r##" + let exp = r#" # HELP name counter # TYPE name counter name{tag="a,b"} 0 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), Ok(vec![Metric::new( @@ -415,11 +415,11 @@ mod test { #[test] fn test_parse_tag_escaping() { - let exp = r##" + let exp = r#" # HELP name counter # TYPE name counter name{tag="\\n"} 0 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), Ok(vec![Metric::new( @@ -434,11 +434,11 @@ mod test { #[test] fn test_parse_tag_dont_trim_value() { - let exp = r##" + let exp = r#" # HELP name counter # TYPE name counter name{tag=" * "} 0 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), Ok(vec![Metric::new( @@ -453,9 +453,9 @@ mod test { #[test] fn test_parse_tag_value_containing_equals() { - let exp = r##" + let exp = r#" telemetry_scrape_size_bytes_count{registry="default",content_type="text/plain; version=0.0.4"} 1890 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), @@ -472,27 +472,27 @@ mod test { #[test] fn test_parse_tag_error_no_value() { - let exp = r##" + let exp = r#" telemetry_scrape_size_bytes_count{registry="default",content_type} 1890 1612411506789 - "##; + "#; assert!(parse_text(exp).is_err()); } #[test] fn test_parse_tag_error_equals_empty_value() { - let exp = r##" + let exp = r#" telemetry_scrape_size_bytes_count{registry="default",content_type=} 1890 1612411506789 - "##; + "#; assert!(parse_text(exp).is_err()); } #[test] fn test_gauge_weird_timestamp() { - let exp = r##" + let exp = r#" something_weird{problem="division by zero"} +Inf -3982045000 - "##; + "#; assert_event_data_eq!( parse_text(exp), @@ -512,11 +512,11 @@ mod test { #[test] fn test_gauge_tabs() { - let exp = r##" + let exp = r#" # TYPE latency gauge latency{env="production"} 1.0 1395066363000 latency{env="testing"} 2.0 1395066363000 - "##; + "#; assert_event_data_eq!( parse_text(exp), @@ -577,10 +577,10 @@ mod test { #[test] fn test_no_value() { - let exp = r##" + let exp = r#" # TYPE latency counter latency{env="production"} - "##; + "#; assert!(parse_text(exp).is_err()); } @@ -639,7 +639,7 @@ mod test { #[test] fn test_histogram() { - let exp = r##" + let exp = r#" # HELP http_request_duration_seconds A histogram of the request duration. # TYPE http_request_duration_seconds histogram http_request_duration_seconds_bucket{le="0.05"} 24054 1612411506789 @@ -650,7 +650,7 @@ mod test { http_request_duration_seconds_bucket{le="+Inf"} 144320 1612411506789 http_request_duration_seconds_sum 53423 1612411506789 http_request_duration_seconds_count 144320 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), @@ -671,14 +671,14 @@ mod test { #[test] fn test_histogram_out_of_order() { - let exp = r##" + let exp = r#" # HELP duration A histogram of the request duration. # TYPE duration histogram duration_bucket{le="+Inf"} 144320 1612411506789 duration_bucket{le="1"} 133988 1612411506789 duration_sum 53423 1612411506789 duration_count 144320 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), @@ -697,7 +697,7 @@ mod test { #[test] fn test_histogram_backward_values() { - let exp = r##" + let exp = r#" # HELP duration A histogram of the request duration. # TYPE duration histogram duration_bucket{le="1"} 2000 1612411506789 @@ -705,7 +705,7 @@ mod test { duration_bucket{le="+Inf"} 2000 1612411506789 duration_sum 2000 1612411506789 duration_count 2000 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), @@ -724,7 +724,7 @@ mod test { #[test] fn test_histogram_with_labels() { - let exp = r##" + let exp = r#" # HELP gitlab_runner_job_duration_seconds Histogram of job durations # TYPE gitlab_runner_job_duration_seconds histogram gitlab_runner_job_duration_seconds_bucket{runner="z",le="30"} 327 1612411506789 @@ -766,7 +766,7 @@ mod test { gitlab_runner_job_duration_seconds_bucket{runner="y",le="+Inf"} 3255 1612411506789 gitlab_runner_job_duration_seconds_sum{runner="y"} 381111.7498891335 1612411506789 gitlab_runner_job_duration_seconds_count{runner="y"} 3255 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), @@ -829,7 +829,7 @@ mod test { #[test] fn test_summary() { - let exp = r##" + let exp = r#" # HELP rpc_duration_seconds A summary of the RPC duration in seconds. # TYPE rpc_duration_seconds summary rpc_duration_seconds{service="a",quantile="0.01"} 3102 1612411506789 @@ -848,7 +848,7 @@ mod test { go_gc_duration_seconds{quantile="1"} 0.018827136 1612411506789 go_gc_duration_seconds_sum 4668.551713715 1612411506789 go_gc_duration_seconds_count 602767 1612411506789 - "##; + "#; assert_event_data_eq!( parse_text(exp), @@ -893,7 +893,7 @@ mod test { // https://github.com/vectordotdev/vector/issues/3276 #[test] fn test_nginx() { - let exp = r##" + let exp = r#" # HELP nginx_server_bytes request/response bytes # TYPE nginx_server_bytes counter nginx_server_bytes{direction="in",host="*"} 263719 @@ -910,7 +910,7 @@ mod test { nginx_server_cache{host="*",status="miss"} 0 nginx_server_cache{host="*",status="revalidated"} 0 nginx_server_cache{host="*",status="scarce"} 0 - "##; + "#; let now = Utc::now(); let result = parse_text(exp).expect("Parsing failed"); diff --git a/src/sources/prometheus/scrape.rs b/src/sources/prometheus/scrape.rs index 408cde50097e9..2d6135348a198 100644 --- a/src/sources/prometheus/scrape.rs +++ b/src/sources/prometheus/scrape.rs @@ -638,7 +638,7 @@ mod test { let make_svc = make_service_fn(|_| async { Ok::<_, Error>(service_fn(|_| async { Ok::<_, Error>(Response::new(Body::from( - r##" + r#" # HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. # TYPE promhttp_metric_handler_requests_total counter promhttp_metric_handler_requests_total{code="200"} 100 1612411516789 @@ -665,7 +665,7 @@ mod test { rpc_duration_seconds{code="200",quantile="0.99"} 76656 1612411516789 rpc_duration_seconds_sum{code="200"} 1.7560473e+07 1612411516789 rpc_duration_seconds_count{code="200"} 2693 1612411516789 - "##, + "#, ))) })) }); diff --git a/src/sources/redis/mod.rs b/src/sources/redis/mod.rs index 1569449ddf29b..9dd343211c9ea 100644 --- a/src/sources/redis/mod.rs +++ b/src/sources/redis/mod.rs @@ -228,7 +228,7 @@ impl SourceConfig for RedisSourceConfig { } } -pub(self) struct InputHandler { +struct InputHandler { pub client: redis::Client, pub bytes_received: Registered, pub events_received: Registered, diff --git a/src/sources/util/framestream.rs b/src/sources/util/framestream.rs index 287fb0e583585..48555cc908a9e 100644 --- a/src/sources/util/framestream.rs +++ b/src/sources/util/framestream.rs @@ -330,7 +330,7 @@ impl FrameStreamReader { fn send_control_frame(&mut self, frame: Bytes) { let empty_frame = Bytes::from(&b""[..]); //send empty frame to say we are control frame - let mut stream = stream::iter(vec![Ok(empty_frame), Ok(frame)].into_iter()); + let mut stream = stream::iter(vec![Ok(empty_frame), Ok(frame)]); if let Err(e) = block_on(self.response_sink.lock().unwrap().send_all(&mut stream)) { error!("Encountered error '{:#?}' while sending control frame.", e); diff --git a/src/topology/test/backpressure.rs b/src/topology/test/backpressure.rs index 9c17f5dea4d1d..bf16f93611e04 100644 --- a/src/topology/test/backpressure.rs +++ b/src/topology/test/backpressure.rs @@ -11,7 +11,7 @@ use crate::{test_util::mock::backpressure_sink, topology::builder::SOURCE_SENDER // Based on how we pump events from `SourceSender` into `Fanout`, there's always one extra event we // may pull out of `SourceSender` but can't yet send into `Fanout`, so we account for that here. -pub(self) const EXTRA_SOURCE_PUMP_EVENT: usize = 1; +const EXTRA_SOURCE_PUMP_EVENT: usize = 1; /// Connects a single source to a single sink and makes sure the sink backpressure is propagated /// to the source. diff --git a/src/topology/test/end_to_end.rs b/src/topology/test/end_to_end.rs index e9b1f8e499945..75580479ed58c 100644 --- a/src/topology/test/end_to_end.rs +++ b/src/topology/test/end_to_end.rs @@ -116,7 +116,7 @@ uri = "http://{address2}/" // The expected flow is this: // 0. Nothing is ready to continue. - assert!(matches!(rx_server.try_recv(), Err(_))); + assert!(rx_server.try_recv().is_err()); // 1. We send an event to the HTTP source server. let (mut rx_client, sender) = http_client(address1, "test"); @@ -126,11 +126,11 @@ uri = "http://{address2}/" .await .expect("Timed out waiting to receive event from HTTP sink") .expect("Error receiving event from HTTP sink"); - assert!(matches!(rx_client.try_recv(), Err(_))); + assert!(rx_client.try_recv().is_err()); // 3. Our test HTTP server waits for the mutex lock. drop(pause); - assert!(matches!(rx_server.try_recv(), Err(_))); + assert!(rx_server.try_recv().is_err()); // 4. Our test HTTP server responds. // 5. The acknowledgement is returned to the source. diff --git a/src/transforms/lua/v1/mod.rs b/src/transforms/lua/v1/mod.rs index 2c7dd908cc47f..4da72b6c8e9bd 100644 --- a/src/transforms/lua/v1/mod.rs +++ b/src/transforms/lua/v1/mod.rs @@ -206,7 +206,7 @@ impl TaskTransform for Lua { let mut output = Vec::with_capacity(1); ready(match inner.process(event) { Ok(event) => { - output.extend(event.into_iter()); + output.extend(event); Some(stream::iter(output)) } Err(error) => { diff --git a/vdev/src/app.rs b/vdev/src/app.rs index 3fcf6bcf30764..9deef774f82f7 100644 --- a/vdev/src/app.rs +++ b/vdev/src/app.rs @@ -165,7 +165,9 @@ impl CommandExt for Command { let result = self.output(); progress_bar.finish_and_clear(); - let Ok(output) = result else {bail!("could not run command")}; + let Ok(output) = result else { + bail!("could not run command") + }; if output.status.success() { Ok(()) diff --git a/vdev/src/commands/crate_versions.rs b/vdev/src/commands/crate_versions.rs index a471d24d0032a..f12442967effa 100644 --- a/vdev/src/commands/crate_versions.rs +++ b/vdev/src/commands/crate_versions.rs @@ -22,7 +22,7 @@ pub struct Cli { impl Cli { pub fn exec(self) -> Result<()> { - let re_crate = Regex::new(r#" (\S+) v([0-9.]+)"#).unwrap(); + let re_crate = Regex::new(r" (\S+) v([0-9.]+)").unwrap(); let mut versions: HashMap> = HashMap::default(); for line in Command::new("cargo") diff --git a/vdev/src/commands/test_vrl.rs b/vdev/src/commands/test_vrl.rs index 68a2f810c4920..6644623acef09 100644 --- a/vdev/src/commands/test_vrl.rs +++ b/vdev/src/commands/test_vrl.rs @@ -18,5 +18,5 @@ impl Cli { fn run_tests(path: &[&str]) -> Result<()> { let path: PathBuf = path.iter().collect(); env::set_current_dir(path).context("Could not change directory")?; - app::exec("cargo", ["run"].into_iter(), false) + app::exec("cargo", ["run"], false) } diff --git a/vdev/src/testing/config.rs b/vdev/src/testing/config.rs index 8dab503a493f8..2beb3fc909d26 100644 --- a/vdev/src/testing/config.rs +++ b/vdev/src/testing/config.rs @@ -151,7 +151,7 @@ impl IntegrationTestConfig { let config: Environment = self .matrix .keys() - .zip(product.into_iter()) + .zip(product) .map(|(variable, value)| (variable.clone(), Some(value.clone()))) .collect(); (key, config)