From 2dfb84e425af304b0a08585c7b5b987dd53faee0 Mon Sep 17 00:00:00 2001 From: Rafael RL Date: Fri, 6 Dec 2024 11:26:09 +0100 Subject: [PATCH 1/4] Use samllvec to reduce heap use, use inline functions and improvments --- databroker/Cargo.toml | 1 + databroker/src/broker.rs | 141 ++++++++++++++---- databroker/src/grpc/kuksa_val_v1/val.rs | 17 ++- .../src/grpc/kuksa_val_v2/conversions.rs | 28 ++-- databroker/src/grpc/kuksa_val_v2/val.rs | 68 ++++----- databroker/src/permissions.rs | 8 +- databroker/src/viss/v2/server.rs | 11 +- 7 files changed, 174 insertions(+), 100 deletions(-) diff --git a/databroker/Cargo.toml b/databroker/Cargo.toml index 2cf7f8b6..90ba1f9e 100644 --- a/databroker/Cargo.toml +++ b/databroker/Cargo.toml @@ -62,6 +62,7 @@ lazy_static = "1.4.0" thiserror = "1.0.47" futures = { version = "0.3.28" } async-trait = "0.1.82" +smallvec = "1.13.2" # VISS axum = { version = "0.6.20", optional = true, features = ["ws"] } diff --git a/databroker/src/broker.rs b/databroker/src/broker.rs index 8c777f0e..b24c7035 100644 --- a/databroker/src/broker.rs +++ b/databroker/src/broker.rs @@ -14,6 +14,8 @@ use crate::permissions::{PermissionError, Permissions}; pub use crate::types; +use smallvec::SmallVec; + use crate::query; pub use crate::types::{ChangeType, DataType, DataValue, EntryType}; @@ -22,7 +24,7 @@ use tokio_stream::wrappers::BroadcastStream; use tokio_stream::wrappers::ReceiverStream; use tokio_stream::{Stream, StreamExt}; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::convert::TryFrom; use std::sync::atomic::{AtomicI32, Ordering}; use std::sync::Arc; @@ -112,6 +114,10 @@ pub enum Field { ActuatorTarget, MetadataUnit, } +#[derive(Debug, Clone)] +pub struct StackVecField { + pub svec: SmallVec<[Field; 3]>, +} #[derive(Default)] pub struct Database { @@ -142,7 +148,7 @@ pub struct QueryField { pub struct ChangeNotification { pub id: i32, pub update: EntryUpdate, - pub fields: HashSet, + pub fields: StackVecField, } #[derive(Debug, Default, Clone)] @@ -201,7 +207,7 @@ pub struct QuerySubscription { } pub struct ChangeSubscription { - entries: HashMap>, + entries: HashMap, sender: broadcast::Sender, permissions: Permissions, } @@ -233,7 +239,63 @@ pub struct EntryUpdate { pub unit: Option, } +impl StackVecField { + #[inline] + pub fn new() -> Self { + Self { + svec: SmallVec::new(), + } + } + + #[inline] + pub fn push(&mut self, value: Field) { + self.svec.push(value); + } + + #[inline] + pub fn contains(&self, element: &Field) -> bool { + self.svec.contains(element) + } + + #[inline] + pub fn extend_from_stack(&mut self, other: &StackVecField) { + self.svec.extend(other.svec.iter().cloned()); + } + + #[inline] + pub fn with_elements(elements: SmallVec<[Field; 3]>) -> Self { + Self { svec: elements } + } + + #[inline] + pub fn are_disjoint(&self, other: &StackVecField) -> bool { + for item in &self.svec { + if other.svec.contains(item) { + return false; // Found a common element + } + } + true // No common elements found + } + + #[inline] + pub fn is_empty(&self) -> bool { + self.svec.is_empty() + } + + #[inline] + pub fn iter(&self) -> impl Iterator { + self.svec.iter() + } +} + +impl Default for StackVecField { + fn default() -> Self { + Self::new() + } +} + impl Entry { + #[inline] pub fn diff(&self, mut update: EntryUpdate) -> EntryUpdate { if let Some(datapoint) = &update.datapoint { if self.metadata.change_type != ChangeType::Continuous { @@ -710,16 +772,17 @@ impl Entry { self.lag_datapoint = self.datapoint.clone(); } - pub fn apply(&mut self, update: EntryUpdate) -> HashSet { - let mut changed = HashSet::new(); + #[inline] + pub fn apply(&mut self, update: EntryUpdate) -> StackVecField { + let mut changed = StackVecField::new(); if let Some(datapoint) = update.datapoint { self.lag_datapoint = self.datapoint.clone(); self.datapoint = datapoint; - changed.insert(Field::Datapoint); + changed.push(Field::Datapoint); } if let Some(actuator_target) = update.actuator_target { self.actuator_target = actuator_target; - changed.insert(Field::ActuatorTarget); + changed.push(Field::ActuatorTarget); } if let Some(updated_allowed) = update.allowed { @@ -755,10 +818,23 @@ impl Subscriptions { pub async fn notify( &self, - changed: Option<&HashMap>>, + changed: Option<&HashMap>, db: &Database, ) -> Result>, NotificationError> { let mut error = None; + + for sub in &self.change_subscriptions { + match sub.notify(changed, db).await { + Ok(_) => {} + Err(err) => error = Some(err), + } + } + + //Leave method here if error is none and query_subscription is empty + if error.is_none() && self.query_subscriptions.is_empty() { + return Ok(None); + } + let mut lag_updates: HashMap = HashMap::new(); for sub in &self.query_subscriptions { match sub.notify(changed, db).await { @@ -774,13 +850,6 @@ impl Subscriptions { } } - for sub in &self.change_subscriptions { - match sub.notify(changed, db).await { - Ok(_) => {} - Err(err) => error = Some(err), - } - } - match error { Some(err) => Err(err), None => { @@ -837,7 +906,7 @@ impl Subscriptions { impl ChangeSubscription { async fn notify( &self, - changed: Option<&HashMap>>, + changed: Option<&HashMap>, db: &Database, ) -> Result<(), NotificationError> { let db_read = db.authorized_read_access(&self.permissions); @@ -846,7 +915,7 @@ impl ChangeSubscription { let mut matches = false; for (id, changed_fields) in changed { if let Some(fields) = self.entries.get(id) { - if !fields.is_disjoint(changed_fields) { + if !fields.are_disjoint(changed_fields) { matches = true; break; } @@ -858,25 +927,25 @@ impl ChangeSubscription { let mut notifications = EntryUpdates::default(); for (id, changed_fields) in changed { if let Some(fields) = self.entries.get(id) { - if !fields.is_disjoint(changed_fields) { + if !fields.are_disjoint(changed_fields) { match db_read.get_entry_by_id(*id) { Ok(entry) => { let mut update = EntryUpdate::default(); - let mut notify_fields = HashSet::new(); + let mut notify_fields = StackVecField::new(); // TODO: Perhaps make path optional update.path = Some(entry.metadata.path.clone()); if changed_fields.contains(&Field::Datapoint) && fields.contains(&Field::Datapoint) { update.datapoint = Some(entry.datapoint.clone()); - notify_fields.insert(Field::Datapoint); + notify_fields.push(Field::Datapoint); } if changed_fields.contains(&Field::ActuatorTarget) && fields.contains(&Field::ActuatorTarget) { update.actuator_target = Some(entry.actuator_target.clone()); - notify_fields.insert(Field::ActuatorTarget); + notify_fields.push(Field::ActuatorTarget); } // fill unit field always update.unit.clone_from(&entry.metadata.unit); @@ -922,16 +991,16 @@ impl ChangeSubscription { match db_read.get_entry_by_id(*id) { Ok(entry) => { let mut update = EntryUpdate::default(); - let mut notify_fields = HashSet::new(); + let mut notify_fields = StackVecField::new(); // TODO: Perhaps make path optional update.path = Some(entry.metadata.path.clone()); if fields.contains(&Field::Datapoint) { update.datapoint = Some(entry.datapoint.clone()); - notify_fields.insert(Field::Datapoint); + notify_fields.push(Field::Datapoint); } if fields.contains(&Field::ActuatorTarget) { update.actuator_target = Some(entry.actuator_target.clone()); - notify_fields.insert(Field::ActuatorTarget); + notify_fields.push(Field::ActuatorTarget); } notifications.updates.push(ChangeNotification { id: *id, @@ -989,7 +1058,7 @@ impl QuerySubscription { } fn check_if_changes_match( query: &CompiledQuery, - changed_origin: Option<&HashMap>>, + changed_origin: Option<&HashMap>, db: &DatabaseReadAccess, ) -> bool { match changed_origin { @@ -1039,7 +1108,7 @@ impl QuerySubscription { } fn generate_input( &self, - changed: Option<&HashMap>>, + changed: Option<&HashMap>, db: &DatabaseReadAccess, ) -> Option { let id_used_in_query = QuerySubscription::check_if_changes_match(&self.query, changed, db); @@ -1055,7 +1124,7 @@ impl QuerySubscription { async fn notify( &self, - changed: Option<&HashMap>>, + changed: Option<&HashMap>, db: &Database, ) -> Result, NotificationError> { let db_read = db.authorized_read_access(&self.permissions); @@ -1208,7 +1277,7 @@ impl<'a, 'b> DatabaseWriteAccess<'a, 'b> { &mut self, path: &str, update: EntryUpdate, - ) -> Result, UpdateError> { + ) -> Result { match self.db.path_to_id.get(path) { Some(id) => self.update(*id, update), None => Err(UpdateError::NotFound), @@ -1228,7 +1297,7 @@ impl<'a, 'b> DatabaseWriteAccess<'a, 'b> { } } - pub fn update(&mut self, id: i32, update: EntryUpdate) -> Result, UpdateError> { + pub fn update(&mut self, id: i32, update: EntryUpdate) -> Result { match self.db.entries.get_mut(&id) { Some(entry) => { if update.path.is_some() @@ -1569,7 +1638,7 @@ impl<'a, 'b> AuthorizedAccess<'a, 'b> { let cleanup_needed = { let changed = { - let mut changed = HashMap::>::new(); + let mut changed = HashMap::::new(); for (id, update) in updates { debug!("setting id {} to {:?}", id, update); match db_write.update(id, update) { @@ -1631,7 +1700,7 @@ impl<'a, 'b> AuthorizedAccess<'a, 'b> { pub async fn subscribe( &self, - valid_entries: HashMap>, + valid_entries: HashMap, buffer_size: Option, ) -> Result, SubscriptionError> { if valid_entries.is_empty() { @@ -4262,7 +4331,10 @@ pub mod tests { let mut stream = broker .subscribe( - HashMap::from([(id1, HashSet::from([Field::Datapoint]))]), + HashMap::from([( + id1, + StackVecField::with_elements(smallvec::smallvec![Field::Datapoint]), + )]), buffer_size, ) .await @@ -4371,7 +4443,10 @@ pub mod tests { match broker .subscribe( - HashMap::from([(id1, HashSet::from([Field::Datapoint]))]), + HashMap::from([( + id1, + StackVecField::with_elements(smallvec::smallvec![Field::Datapoint]), + )]), // 1001 is just outside valid range 0-1000 Some(1001), ) diff --git a/databroker/src/grpc/kuksa_val_v1/val.rs b/databroker/src/grpc/kuksa_val_v1/val.rs index 1ccc8995..1ca06573 100644 --- a/databroker/src/grpc/kuksa_val_v1/val.rs +++ b/databroker/src/grpc/kuksa_val_v1/val.rs @@ -29,6 +29,7 @@ use tracing::info; use crate::broker; use crate::broker::ReadError; +use crate::broker::StackVecField; use crate::broker::SubscriptionError; use crate::broker::{AuthorizedAccess, EntryReadAccess}; use crate::glob::Matcher; @@ -494,7 +495,7 @@ impl proto::val_server::Val for broker::DataBroker { )); } - let mut valid_requests: HashMap)> = HashMap::new(); + let mut valid_requests: HashMap = HashMap::new(); for entry in &request.entries { if entry.path.len() > MAX_REQUEST_PATH_LENGTH { @@ -507,18 +508,18 @@ impl proto::val_server::Val for broker::DataBroker { match Matcher::new(&entry.path) { Ok(matcher) => { - let mut fields = HashSet::new(); + let mut fields = StackVecField::new(); for id in &entry.fields { if let Ok(field) = proto::Field::try_from(*id) { match field { proto::Field::Value => { - fields.insert(broker::Field::Datapoint); + fields.push(broker::Field::Datapoint); } proto::Field::ActuatorTarget => { - fields.insert(broker::Field::ActuatorTarget); + fields.push(broker::Field::ActuatorTarget); } proto::Field::MetadataUnit => { - fields.insert(broker::Field::MetadataUnit); + fields.push(broker::Field::MetadataUnit); } _ => { // Just ignore other fields for now @@ -535,7 +536,7 @@ impl proto::val_server::Val for broker::DataBroker { } } - let mut entries: HashMap> = HashMap::new(); + let mut entries: HashMap = HashMap::new(); if !valid_requests.is_empty() { for (path, (matcher, fields)) in valid_requests { @@ -549,7 +550,7 @@ impl proto::val_server::Val for broker::DataBroker { entries .entry(entry.metadata().id) .and_modify(|existing_fields| { - existing_fields.extend(fields.clone()); + existing_fields.extend_from_stack(&fields); }) .or_insert(fields.clone()); @@ -575,7 +576,7 @@ impl proto::val_server::Val for broker::DataBroker { entries .entry(entry.metadata().id) .and_modify(|existing_fields| { - existing_fields.extend(fields.clone()); + existing_fields.extend_from_stack(&fields); }) .or_insert(fields.clone()); diff --git a/databroker/src/grpc/kuksa_val_v2/conversions.rs b/databroker/src/grpc/kuksa_val_v2/conversions.rs index e632e4f7..67d69a62 100644 --- a/databroker/src/grpc/kuksa_val_v2/conversions.rs +++ b/databroker/src/grpc/kuksa_val_v2/conversions.rs @@ -22,32 +22,26 @@ use std::time::SystemTime; use tracing::debug; impl From<&proto::Datapoint> for broker::Datapoint { + #[inline] fn from(datapoint: &proto::Datapoint) -> Self { let value = broker::DataValue::from(datapoint); let ts = SystemTime::now(); - match &datapoint.timestamp { - Some(source_timestamp) => { - let source: Option = match source_timestamp.clone().try_into() { - Ok(source) => Some(source), - Err(_) => None, - }; - broker::Datapoint { - ts, - source_ts: source, - value, - } - } - None => broker::Datapoint { - ts, - source_ts: None, - value, - }, + let source_ts = datapoint + .timestamp + .as_ref() + .and_then(|source_timestamp| source_timestamp.clone().try_into().ok()); + + broker::Datapoint { + ts, + source_ts, + value, } } } impl From for Option { + #[inline] fn from(from: broker::Datapoint) -> Self { match from.value { broker::DataValue::NotAvailable => Some(proto::Datapoint { diff --git a/databroker/src/grpc/kuksa_val_v2/val.rs b/databroker/src/grpc/kuksa_val_v2/val.rs index 809fae4f..e2d2a738 100644 --- a/databroker/src/grpc/kuksa_val_v2/val.rs +++ b/databroker/src/grpc/kuksa_val_v2/val.rs @@ -15,7 +15,8 @@ use std::{collections::HashMap, pin::Pin}; use crate::{ broker::{ - self, ActuationChange, ActuationProvider, AuthorizedAccess, ReadError, SubscriptionError, + self, ActuationChange, ActuationProvider, AuthorizedAccess, ReadError, StackVecField, + SubscriptionError, }, glob::Matcher, permissions::Permissions, @@ -34,7 +35,6 @@ use kuksa::proto::v2::{ signal_id, ActuateRequest, ActuateResponse, BatchActuateStreamRequest, ErrorCode, ListMetadataResponse, ProvideActuationResponse, }; -use std::collections::HashSet; use tokio::{select, sync::mpsc}; use tokio_stream::{wrappers::ReceiverStream, Stream, StreamExt}; use tracing::debug; @@ -228,7 +228,7 @@ impl proto::val_server::Val for broker::DataBroker { let signal_paths = request.signal_paths; let size = signal_paths.len(); - let mut valid_requests: HashMap> = HashMap::with_capacity(size); + let mut valid_requests: HashMap = HashMap::with_capacity(size); for path in signal_paths { valid_requests.insert( @@ -243,7 +243,7 @@ impl proto::val_server::Val for broker::DataBroker { Ok(signal_id) => signal_id, Err(err) => return Err(err), }, - vec![broker::Field::Datapoint].into_iter().collect(), + StackVecField::with_elements(smallvec::smallvec![broker::Field::Datapoint]), ); } @@ -301,7 +301,7 @@ impl proto::val_server::Val for broker::DataBroker { let signal_ids = request.signal_ids; let size = signal_ids.len(); - let mut valid_requests: HashMap> = HashMap::with_capacity(size); + let mut valid_requests: HashMap = HashMap::with_capacity(size); for id in signal_ids { valid_requests.insert( @@ -316,7 +316,7 @@ impl proto::val_server::Val for broker::DataBroker { Ok(signal_id) => signal_id, Err(err) => return Err(err), }, - vec![broker::Field::Datapoint].into_iter().collect(), + StackVecField::with_elements(smallvec::smallvec![broker::Field::Datapoint]), ); } @@ -653,13 +653,6 @@ impl proto::val_server::Val for broker::DataBroker { match request { Some(req) => { match req.action { - Some(ProvideActuationRequest(provided_actuation)) => { - let response = provide_actuation(&broker, &provided_actuation, response_stream_sender.clone()).await; - if let Err(err) = response_stream_sender.send(response).await - { - debug!("Failed to send response: {}", err) - } - }, Some(PublishValuesRequest(publish_values_request)) => { let response = publish_values(&broker, &publish_values_request).await; if let Some(value) = response { @@ -668,6 +661,13 @@ impl proto::val_server::Val for broker::DataBroker { } } }, + Some(ProvideActuationRequest(provided_actuation)) => { + let response = provide_actuation(&broker, &provided_actuation, response_stream_sender.clone()).await; + if let Err(err) = response_stream_sender.send(response).await + { + debug!("Failed to send response: {}", err) + } + }, Some(BatchActuateStreamResponse(batch_actuate_stream_response)) => { if let Some(error) = batch_actuate_stream_response.error { @@ -804,31 +804,29 @@ async fn provide_actuation( } } +#[inline] async fn publish_values( broker: &AuthorizedAccess<'_, '_>, request: &databroker_proto::kuksa::val::v2::PublishValuesRequest, ) -> Option { - let ids: Vec<(i32, broker::EntryUpdate)> = request - .data_points - .iter() - .map(|(id, datapoint)| { - ( - *id, - broker::EntryUpdate { - path: None, - datapoint: Some(broker::Datapoint::from(datapoint)), - actuator_target: None, - entry_type: None, - data_type: None, - description: None, - allowed: None, - min: None, - max: None, - unit: None, - }, - ) - }) - .collect(); + let mut ids = Vec::with_capacity(request.data_points.len()); + ids.extend(request.data_points.iter().map(|(id, datapoint)| { + ( + *id, + broker::EntryUpdate { + path: None, + datapoint: Some(broker::Datapoint::from(datapoint)), + actuator_target: None, + entry_type: None, + data_type: None, + description: None, + allowed: None, + min: None, + max: None, + unit: None, + }, + ) + })); // TODO check if provider is allowed to update the entries for the provided signals? match broker.update_entries(ids).await { @@ -849,6 +847,7 @@ async fn publish_values( } } +#[inline] async fn get_signal( signal_id: Option, broker: &AuthorizedAccess<'_, '_>, @@ -902,6 +901,7 @@ fn convert_to_proto_stream( }) } +#[inline] fn convert_to_proto_stream_id( input: impl Stream, size: usize, diff --git a/databroker/src/permissions.rs b/databroker/src/permissions.rs index 8157b811..8d4e1ec2 100644 --- a/databroker/src/permissions.rs +++ b/databroker/src/permissions.rs @@ -223,12 +223,8 @@ impl Permissions { #[inline] pub fn is_expired(&self) -> bool { - if let Some(expires_at) = self.expires_at { - if expires_at < SystemTime::now() { - return true; - } - } - false + self.expires_at + .map_or(false, |expires_at| expires_at < SystemTime::now()) } } diff --git a/databroker/src/viss/v2/server.rs b/databroker/src/viss/v2/server.rs index 476670c5..7da1d6c4 100644 --- a/databroker/src/viss/v2/server.rs +++ b/databroker/src/viss/v2/server.rs @@ -12,13 +12,15 @@ ********************************************************************************/ use std::{ - collections::{HashMap, HashSet}, + collections::HashMap, convert::TryFrom, pin::Pin, sync::Arc, time::SystemTime, }; +use crate::broker::StackVecField; + use futures::{ stream::{AbortHandle, Abortable}, Stream, StreamExt, @@ -262,7 +264,12 @@ impl Viss for Server { let Some(entries) = broker .get_id_by_path(request.path.as_ref()) .await - .map(|id| HashMap::from([(id, HashSet::from([broker::Field::Datapoint]))])) + .map(|id| { + HashMap::from([( + id, + StackVecField::with_elements(smallvec::smallvec![broker::Field::Datapoint]), + )]) + }) else { return Err(SubscribeErrorResponse { request_id, From 4de7330621ba247ad087685c56df53b6530db876 Mon Sep 17 00:00:00 2001 From: Rafael RL Date: Fri, 6 Dec 2024 11:32:31 +0100 Subject: [PATCH 2/4] fmt --- databroker/src/viss/v2/server.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/databroker/src/viss/v2/server.rs b/databroker/src/viss/v2/server.rs index 7da1d6c4..5296fe5a 100644 --- a/databroker/src/viss/v2/server.rs +++ b/databroker/src/viss/v2/server.rs @@ -11,13 +11,7 @@ * SPDX-License-Identifier: Apache-2.0 ********************************************************************************/ -use std::{ - collections::HashMap, - convert::TryFrom, - pin::Pin, - sync::Arc, - time::SystemTime, -}; +use std::{collections::HashMap, convert::TryFrom, pin::Pin, sync::Arc, time::SystemTime}; use crate::broker::StackVecField; From 43c0e4792ac5bdb1d53423bcf917dabccd1f9b3b Mon Sep 17 00:00:00 2001 From: Rafael RL Date: Fri, 6 Dec 2024 11:48:04 +0100 Subject: [PATCH 3/4] Avoid clonning of metadata which is not used --- databroker/src/broker.rs | 14 ++++++++++++++ databroker/src/grpc/kuksa_val_v2/val.rs | 6 +++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/databroker/src/broker.rs b/databroker/src/broker.rs index b24c7035..0a7bbc75 100644 --- a/databroker/src/broker.rs +++ b/databroker/src/broker.rs @@ -1259,6 +1259,11 @@ impl<'a, 'b> DatabaseReadAccess<'a, 'b> { self.db.entries.get(&id).map(|entry| &entry.metadata) } + #[inline] + pub fn contains_id(&self, id: i32) -> bool { + self.db.entries.contains_key(&id) + } + pub fn get_metadata_by_path(&self, path: &str) -> Option<&Metadata> { let id = self.db.path_to_id.get(path)?; self.get_metadata_by_id(*id) @@ -1562,6 +1567,15 @@ impl<'a, 'b> AuthorizedAccess<'a, 'b> { .cloned() } + pub async fn contains_id(&self, id: i32) -> bool { + self.broker + .database + .read() + .await + .authorized_read_access(self.permissions) + .contains_id(id) + } + pub async fn get_metadata_by_path(&self, path: &str) -> Option { self.broker .database diff --git a/databroker/src/grpc/kuksa_val_v2/val.rs b/databroker/src/grpc/kuksa_val_v2/val.rs index e2d2a738..3a4a85e6 100644 --- a/databroker/src/grpc/kuksa_val_v2/val.rs +++ b/databroker/src/grpc/kuksa_val_v2/val.rs @@ -865,9 +865,9 @@ async fn get_signal( None => Err(tonic::Status::not_found("Path not found")), } } - proto::signal_id::Signal::Id(id) => match broker.get_metadata(id).await { - Some(_metadata) => Ok(id), - None => Err(tonic::Status::not_found("Path not found")), + proto::signal_id::Signal::Id(id) => match broker.contains_id(id).await { + true => Ok(id), + false => Err(tonic::Status::not_found("Path not found")), }, } } else { From 33e0d0b74784e82e8a241e8baa4258b741036402 Mon Sep 17 00:00:00 2001 From: Rafael RL Date: Mon, 9 Dec 2024 04:22:08 +0100 Subject: [PATCH 4/4] Add inline --- databroker/src/permissions.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/databroker/src/permissions.rs b/databroker/src/permissions.rs index 8d4e1ec2..389ae6ee 100644 --- a/databroker/src/permissions.rs +++ b/databroker/src/permissions.rs @@ -229,6 +229,7 @@ impl Permissions { } impl PathMatcher { + #[inline] pub fn is_match(&self, path: &str) -> bool { match self { PathMatcher::Nothing => false,