From dc8d7489c9b9585aaf2c781aeeb598c9534ed9d8 Mon Sep 17 00:00:00 2001 From: terassyi Date: Wed, 29 May 2024 23:51:28 +0900 Subject: [PATCH] Refactor log Signed-off-by: terassyi --- e2e/topology/kubernetes-cni-compact.yaml | 208 ++++++++++++++++++ sartd/src/bgp/src/api_server.rs | 2 - sartd/src/bgp/src/peer/peer.rs | 3 - sartd/src/bgp/src/server.rs | 7 +- sartd/src/kubernetes/Cargo.toml | 2 +- .../src/agent/reconciler/bgp_advertisement.rs | 17 +- .../src/agent/reconciler/bgp_peer.rs | 32 +-- .../src/agent/reconciler/node_bgp.rs | 37 ++-- .../controller/reconciler/address_block.rs | 23 +- .../src/controller/reconciler/address_pool.rs | 12 +- .../reconciler/bgp_advertisement.rs | 22 +- .../src/controller/reconciler/cluster_bgp.rs | 24 +- .../reconciler/endpointslice_watcher.rs | 20 +- .../src/controller/reconciler/node_watcher.rs | 11 +- .../controller/reconciler/service_watcher.rs | 46 ++-- 15 files changed, 325 insertions(+), 141 deletions(-) create mode 100644 e2e/topology/kubernetes-cni-compact.yaml diff --git a/e2e/topology/kubernetes-cni-compact.yaml b/e2e/topology/kubernetes-cni-compact.yaml new file mode 100644 index 0000000..fabc4e8 --- /dev/null +++ b/e2e/topology/kubernetes-cni-compact.yaml @@ -0,0 +1,208 @@ +name: sart +topology: + kinds: + linux: + cmd: bash + nodes: + router0: + kind: linux + image: frrouting/frr:v8.4.0 + exec: + - ip addr add 169.254.1.1/24 dev net0 scope link + - ip addr add 169.254.2.1/24 dev net1 scope link + - ip addr add 169.254.3.1/24 dev net2 scope link + - ip addr add 169.254.4.1/24 dev net3 scope link + - ip addr add 192.168.0.1/24 dev net4 scope link + # route for node primary address + - ip route add 172.18.0.6/32 dev net0 + - ip route add 172.18.0.4/32 dev net1 + - ip route add 172.18.0.5/32 dev net2 + - ip route add 172.18.0.3/32 dev net3 + - ip link add dummy0 type dummy + - ip addr add 9.9.9.9/32 dev dummy0 + - ip link set up dev dummy0 + - sysctl -w net.ipv4.fib_multipath_hash_policy=1 + - sysctl -p + # Boiler plate to make FRR work + - touch /etc/frr/vtysh.conf + - sed -i -e 's/bgpd=no/bgpd=yes/g' /etc/frr/daemons + - /usr/lib/frr/frrinit.sh start + # FRR configuration + - >- + vtysh -c 'conf t' + -c 'frr defaults datacenter' + -c '!' + -c 'ip prefix-list LAB-SUBNET permit 10.0.0.0/8 ge 8' + -c '!' + -c 'router bgp 65000' + -c ' bgp router-id 9.9.9.9' + -c ' bgp bestpath as-path multipath-relax' + -c ' neighbor 169.254.1.2 remote-as 65010' + -c ' neighbor 169.254.2.2 remote-as 65020' + -c ' neighbor 169.254.3.2 remote-as 65030' + -c ' neighbor 169.254.4.2 remote-as 65040' + -c ' neighbor 169.254.1.2 update-source dummy0' + -c ' neighbor 169.254.2.2 update-source dummy0' + -c ' neighbor 169.254.3.2 update-source dummy0' + -c ' neighbor 169.254.4.2 update-source dummy0' + -c ' neighbor 169.254.1.2 next-hop-self' + -c ' neighbor 169.254.2.2 next-hop-self' + -c ' neighbor 169.254.3.2 next-hop-self' + -c ' neighbor 169.254.4.2 next-hop-self' + -c '!' + router0-debug: + kind: linux + image: nicolaka/netshoot:latest + network-mode: container:clab-sart-router0 + router1: + kind: linux + image: frrouting/frr:v8.4.0 + exec: + - ip addr add 169.253.1.1/24 dev net0 scope link + - ip addr add 169.253.2.1/24 dev net1 scope link + - ip addr add 169.253.3.1/24 dev net2 scope link + - ip addr add 169.253.4.1/24 dev net3 scope link + - ip addr add 192.168.1.1/24 dev net4 scope link + - ip link add dummy0 type dummy + - ip addr add 7.7.7.7/32 dev dummy0 + - ip link set up dev dummy0 + - ip route add 172.18.0.6/32 dev net0 + - ip route add 172.18.0.4/32 dev net1 + - ip route add 172.18.0.5/32 dev net2 + - ip route add 172.18.0.3/32 dev net3 + - sysctl -w net.ipv4.fib_multipath_hash_policy=1 + - sysctl -p + # Boiler plate to make FRR work + - touch /etc/frr/vtysh.conf + - sed -i -e 's/bgpd=no/bgpd=yes/g' /etc/frr/daemons + - /usr/lib/frr/frrinit.sh start + # FRR configuration + - >- + vtysh -c 'conf t' + -c 'frr defaults datacenter' + -c '!' + -c 'ip prefix-list LAB-SUBNET permit 10.0.0.0/8 ge 8' + -c '!' + -c 'router bgp 65000' + -c ' bgp router-id 7.7.7.7' + -c ' bgp bestpath as-path multipath-relax' + -c ' neighbor 169.253.1.2 remote-as 65010' + -c ' neighbor 169.253.2.2 remote-as 65020' + -c ' neighbor 169.253.3.2 remote-as 65030' + -c ' neighbor 169.253.4.2 remote-as 65040' + -c ' neighbor 169.253.1.2 update-source dummy0' + -c ' neighbor 169.253.2.2 update-source dummy0' + -c ' neighbor 169.253.3.2 update-source dummy0' + -c ' neighbor 169.253.4.2 update-source dummy0' + -c ' neighbor 169.253.1.2 next-hop-self' + -c ' neighbor 169.253.2.2 next-hop-self' + -c ' neighbor 169.253.3.2 next-hop-self' + -c ' neighbor 169.253.4.2 next-hop-self' + -c '!' + router1-debug: + kind: linux + image: nicolaka/netshoot:latest + network-mode: container:clab-sart-router1 + control-plane0: + kind: linux + image: nicolaka/netshoot:latest + network-mode: container:sart-control-plane + exec: + # Enable ECMP + - sysctl -w net.ipv4.fib_multipath_hash_policy=1 + - sysctl -p + # Address for peering + - ip addr add 169.254.1.2/24 dev net0 scope link + - ip addr add 169.253.1.2/24 dev net1 scope link + # Route traffic to the lab through router + - ip route add 172.18.0.4/32 src 172.18.0.6 nexthop via 169.254.1.1 weight 1 nexthop via 169.253.1.1 weight 1 + - ip route add 172.18.0.5/32 src 172.18.0.6 nexthop via 169.254.1.1 weight 1 nexthop via 169.253.1.1 weight 1 + - ip route add 172.18.0.3/32 src 172.18.0.6 nexthop via 169.254.1.1 weight 1 nexthop via 169.253.1.1 weight 1 + - ip route add 192.168.0.0/24 via 169.254.1.1 dev net0 + - ip route add 192.168.1.0/24 via 169.253.1.1 dev net1 + - ip route add 6.6.6.6/32 via 169.254.1.1 dev net0 + - ip route add 9.9.9.9/32 via 169.254.1.1 dev net0 + - ip route add 7.7.7.7/32 via 169.253.1.1 dev net1 + worker0: + kind: linux + image: nicolaka/netshoot:latest + network-mode: container:sart-worker + exec: + # Enable ECMP + - sysctl -w net.ipv4.fib_multipath_hash_policy=1 + - sysctl -p + # Address for peering + - ip addr add 169.254.2.2/24 dev net0 scope link + - ip addr add 169.253.2.2/24 dev net1 scope link + # Route traffic to the lab through router + - ip route add 172.18.0.6/32 src 172.18.0.4 nexthop via 169.254.2.1 weight 1 nexthop via 169.253.2.1 weight 1 + - ip route add 172.18.0.5/32 src 172.18.0.4 nexthop via 169.254.2.1 weight 1 nexthop via 169.253.2.1 weight 1 + - ip route add 172.18.0.3/32 src 172.18.0.4 nexthop via 169.254.2.1 weight 1 nexthop via 169.253.2.1 weight 1 + - ip route add 192.168.0.0/24 via 169.254.2.1 dev net0 + - ip route add 192.168.1.0/24 via 169.253.2.1 dev net1 + - ip route add 6.6.6.6/32 via 169.254.2.1 dev net0 + - ip route add 9.9.9.9/32 via 169.254.2.1 dev net0 + - ip route add 7.7.7.7/32 via 169.253.2.1 dev net1 + worker1: + kind: linux + image: nicolaka/netshoot:latest + network-mode: container:sart-worker2 + exec: + # Enable ECMP + - sysctl -w net.ipv4.fib_multipath_hash_policy=1 + - sysctl -p + # Address for peering + - ip addr add 169.254.3.2/24 dev net0 scope link + - ip addr add 169.253.3.2/24 dev net1 scope link + # Route traffic to the lab through router + - ip route add 172.18.0.6/32 src 172.18.0.5 nexthop via 169.254.3.1 weight 1 nexthop via 169.253.3.1 weight 1 + - ip route add 172.18.0.4/32 src 172.18.0.5 nexthop via 169.254.3.1 weight 1 nexthop via 169.253.3.1 weight 1 + - ip route add 172.18.0.3/32 src 172.18.0.5 nexthop via 169.254.3.1 weight 1 nexthop via 169.253.3.1 weight 1 + - ip route add 192.168.0.0/24 via 169.254.3.1 dev net0 + - ip route add 192.168.1.0/24 via 169.253.3.1 dev net1 + - ip route add 6.6.6.6/32 via 169.254.3.1 dev net0 + - ip route add 9.9.9.9/32 via 169.254.3.1 dev net0 + - ip route add 7.7.7.7/32 via 169.253.3.1 dev net1 + worker2: + kind: linux + image: nicolaka/netshoot:latest + network-mode: container:sart-worker3 + exec: + # Enable ECMP + - sysctl -w net.ipv4.fib_multipath_hash_policy=1 + - sysctl -p + # Address for peering + - ip addr add 169.254.4.2/24 dev net0 scope link + - ip addr add 169.253.4.2/24 dev net1 scope link + # Route traffic to the lab through router + - ip route add 172.18.0.6/32 src 172.18.0.3 nexthop via 169.254.4.1 weight 1 nexthop via 169.253.4.1 weight 1 + - ip route add 172.18.0.4/32 src 172.18.0.3 nexthop via 169.254.4.1 weight 1 nexthop via 169.253.4.1 weight 1 + - ip route add 172.18.0.5/32 src 172.18.0.3 nexthop via 169.254.4.1 weight 1 nexthop via 169.253.4.1 weight 1 + - ip route add 192.168.0.0/24 via 169.254.4.1 dev net0 + - ip route add 192.168.1.0/24 via 169.253.4.1 dev net1 + - ip route add 6.6.6.6/32 via 169.254.4.1 dev net0 + - ip route add 9.9.9.9/32 via 169.254.4.1 dev net0 + - ip route add 7.7.7.7/32 via 169.253.4.1 dev net1 + client0: + kind: linux + image: nicolaka/netshoot:latest + exec: + - ip addr add 192.168.0.2/24 dev net0 + - ip addr add 192.168.1.2/24 dev net1 + - ip link add dummy0 type dummy + - ip addr add 6.6.6.6/32 dev dummy0 + - ip link set up dev dummy0 + - ip route change default src 6.6.6.6 nexthop via 192.168.0.1 weight 1 + - sysctl -w net.ipv4.fib_multipath_hash_policy=1 + - sysctl -p + links: + - endpoints: ["router0:net0", "control-plane0:net0"] + - endpoints: ["router0:net1", "worker0:net0"] + - endpoints: ["router0:net2", "worker1:net0"] + - endpoints: ["router0:net3", "worker2:net0"] + - endpoints: ["router1:net0", "control-plane0:net1"] + - endpoints: ["router1:net1", "worker0:net1"] + - endpoints: ["router1:net2", "worker1:net1"] + - endpoints: ["router1:net3", "worker2:net1"] + - endpoints: ["router0:net4", "client0:net0"] + - endpoints: ["router1:net4", "client0:net1"] diff --git a/sartd/src/bgp/src/api_server.rs b/sartd/src/bgp/src/api_server.rs index f245594..5996219 100644 --- a/sartd/src/bgp/src/api_server.rs +++ b/sartd/src/bgp/src/api_server.rs @@ -52,11 +52,9 @@ impl ApiServer { #[tonic::async_trait] impl BgpApi for ApiServer { async fn health(&self, _req: Request) -> Result, Status> { - tracing::info!("start health checking"); let guard_tx = self.tx.lock().await; guard_tx.send(ControlEvent::Health).await.unwrap(); self.signal.notified().await; - tracing::info!("health checking"); Ok(Response::new(())) } diff --git a/sartd/src/bgp/src/peer/peer.rs b/sartd/src/bgp/src/peer/peer.rs index 8220750..8770987 100644 --- a/sartd/src/bgp/src/peer/peer.rs +++ b/sartd/src/bgp/src/peer/peer.rs @@ -152,7 +152,6 @@ impl Peer { }; let exporter = match exporter { Some(path) => { - tracing::info!("Try to connect to BGP exporter"); match sartd_proto::sart::bgp_exporter_api_client::BgpExporterApiClient::connect( format!("http://{}", path), ) @@ -244,8 +243,6 @@ impl Peer { let (msg_event_tx, mut msg_event_rx) = unbounded_channel::(); - tracing::debug!("handlng the peer event"); - let (conn_close_tx, mut conn_close_rx) = channel::(2); loop { diff --git a/sartd/src/bgp/src/server.rs b/sartd/src/bgp/src/server.rs index 44e8df5..0ee956b 100644 --- a/sartd/src/bgp/src/server.rs +++ b/sartd/src/bgp/src/server.rs @@ -228,7 +228,6 @@ impl Bgp { #[tracing::instrument(skip(self, event))] async fn handle_event(&mut self, event: ControlEvent) -> Result<(), Error> { - tracing::info!(event=%event); match event { ControlEvent::Health => {} ControlEvent::GetBgpInfo => self.get_bgp_info().await?, @@ -251,7 +250,7 @@ impl Bgp { } ControlEvent::DeletePath(family, prefixes) => { self.delete_path(family, prefixes).await?; - }, + } ControlEvent::ConfigureMultiPath(enable) => { self.set_multipath(enable).await?; } @@ -370,7 +369,7 @@ impl Bgp { } } } - tracing::info!("set local asn"); + tracing::info!(asn = asn, "set local asn"); self.rib_event_tx .send(RibEvent::SetAsn(asn)) .await @@ -388,7 +387,7 @@ impl Bgp { return Ok(()); } config.router_id = router_id; - tracing::info!("set local router_id"); + tracing::info!(router_id =? router_id, "set local router_id"); self.rib_event_tx .send(RibEvent::SetRouterId(router_id)) .await diff --git a/sartd/src/kubernetes/Cargo.toml b/sartd/src/kubernetes/Cargo.toml index a9a73fd..8efc962 100644 --- a/sartd/src/kubernetes/Cargo.toml +++ b/sartd/src/kubernetes/Cargo.toml @@ -21,7 +21,6 @@ serde_json = "1.0.108" thiserror = "1.0.53" tokio = { version = "1.35.1", features = ["rt-multi-thread", "macros"] } tracing = "0.1.40" -sartd-trace = { path = "../trace" } serde_yaml = "0.9.29" tonic = "0.10.2" @@ -29,6 +28,7 @@ sartd-cert = { path = "../cert" } sartd-proto = { path = "../proto" } sartd-ipam = { path = "../ipam" } sartd-mock = { path = "../mock" } +sartd-trace = { path = "../trace" } futures = "0.3.30" rtnetlink = "0.13.1" # Ignore v0.14.x actix-web = { version = "4.4.1", features = ["rustls-0_21"] } diff --git a/sartd/src/kubernetes/src/agent/reconciler/bgp_advertisement.rs b/sartd/src/kubernetes/src/agent/reconciler/bgp_advertisement.rs index f7ce1a6..b1e78e4 100644 --- a/sartd/src/kubernetes/src/agent/reconciler/bgp_advertisement.rs +++ b/sartd/src/kubernetes/src/agent/reconciler/bgp_advertisement.rs @@ -6,6 +6,7 @@ use kube::{ runtime::{controller::Action, watcher::Config, Controller}, Api, Client, ResourceExt, }; +use tracing::{field, Span}; use crate::{ agent::{bgp::speaker, error::Error}, @@ -20,7 +21,7 @@ use crate::{ use super::node_bgp::{DEFAULT_SPEAKER_TIMEOUT, ENV_HOSTNAME}; -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip_all, fields(trace_id))] pub async fn run(state: State, interval: u64) { let client = Client::try_default() .await @@ -59,12 +60,6 @@ pub async fn reconciler(ba: Arc, ctx: Arc) -> Result< let bgp_advertisements = Api::::namespaced(ctx.client.clone(), &ns); - tracing::info!( - name = ba.name_any(), - namespace = ns, - "Reconcile BGPAdvertisement" - ); - reconcile(&bgp_advertisements, &ba, ctx).await } @@ -74,6 +69,9 @@ async fn reconcile( ba: &BGPAdvertisement, ctx: Arc, ) -> Result { + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); + let node_bgps = Api::::all(ctx.client.clone()); let node_name = std::env::var(ENV_HOSTNAME).map_err(Error::Var)?; @@ -134,7 +132,6 @@ async fn reconcile( }) .await .map_err(Error::GotgPRC)?; - tracing::info!(name = ba.name_any(), namespace = ba.namespace(), status=?adv_status, response=?res,"Add path response"); *adv_status = AdvertiseStatus::Advertised; need_update = true; @@ -148,7 +145,6 @@ async fn reconcile( }) .await .map_err(Error::GotgPRC)?; - tracing::info!(name = ba.name_any(), namespace = ba.namespace(), status=?adv_status ,response=?res,"Add path response"); } AdvertiseStatus::Withdraw => { let res = speaker_client @@ -158,7 +154,6 @@ async fn reconcile( }) .await .map_err(Error::GotgPRC)?; - tracing::info!(name = ba.name_any(), namespace = ba.namespace(), status=?adv_status, response=?res,"Delete path response"); peers.remove(&p.name); need_update = true; @@ -180,7 +175,7 @@ async fn reconcile( tracing::info!( name = ba.name_any(), namespace = ba.namespace(), - "Update BGPAdvertisement" + "update BGPAdvertisement" ); return Ok(Action::requeue(Duration::from_secs(60))); } diff --git a/sartd/src/kubernetes/src/agent/reconciler/bgp_peer.rs b/sartd/src/kubernetes/src/agent/reconciler/bgp_peer.rs index f3f3a39..4144133 100644 --- a/sartd/src/kubernetes/src/agent/reconciler/bgp_peer.rs +++ b/sartd/src/kubernetes/src/agent/reconciler/bgp_peer.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration}; use futures::StreamExt; use k8s_openapi::api::discovery::v1::EndpointSlice; use kube::{ - api::{ListParams, Patch, PatchParams, PostParams}, + api::{ListParams, PostParams}, runtime::{ controller::Action, finalizer::{finalizer, Event}, @@ -12,6 +12,7 @@ use kube::{ }, Api, Client, ResourceExt, }; +use tracing::{field, Span}; use crate::{ agent::{bgp::speaker, error::Error}, @@ -46,7 +47,8 @@ pub async fn reconciler(bp: Arc, ctx: Arc) -> Result, bp: &BGPPeer, ctx: Arc) -> Result { - tracing::info!(name = bp.name_any(), "Reconcile BGPPeer"); + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); let timeout = match bp.spec.speaker.timeout { Some(t) => t, @@ -64,7 +66,7 @@ async fn reconcile(api: &Api, bp: &BGPPeer, ctx: Arc) -> Resul if info.asn == 0 { tracing::warn!( node_bgp = bp.spec.node_bgp_ref, - "Local BGP speaker is not configured" + "local BGP speaker is not configured" ); return Ok(Action::requeue(Duration::from_secs(1))); } @@ -89,7 +91,7 @@ async fn reconcile(api: &Api, bp: &BGPPeer, ctx: Arc) -> Resul tracing::info!( asn = bp.spec.asn, addr = bp.spec.addr, - "Peer already exists" + "peer already exists" ); // update status match new_bp.status.as_mut() { @@ -107,7 +109,7 @@ async fn reconcile(api: &Api, bp: &BGPPeer, ctx: Arc) -> Resul addr = bp.spec.addr, old_state = ?cond.status, new_state = ?new_state, - "Peer state is changed" + "peer state is changed" ); conditions.push(BGPPeerCondition { status: BGPPeerConditionStatus::try_from(status as i32) @@ -143,7 +145,7 @@ async fn reconcile(api: &Api, bp: &BGPPeer, ctx: Arc) -> Resul asn = bp.spec.asn, addr = bp.spec.addr, state = ?state, - "Peer state is initialized" + "peer state is initialized" ); status.conditions = Some(vec![BGPPeerCondition { status: state, @@ -160,7 +162,7 @@ async fn reconcile(api: &Api, bp: &BGPPeer, ctx: Arc) -> Resul asn = bp.spec.asn, addr = bp.spec.addr, state = ?state, - "Peer state is initialized" + "peer state is initialized" ); new_bp.status = Some(BGPPeerStatus { backoff: 0, @@ -185,7 +187,7 @@ async fn reconcile(api: &Api, bp: &BGPPeer, ctx: Arc) -> Resul name = bp.name_any(), asn = bp.spec.asn, addr = bp.spec.addr, - "Update BGPPeer status" + "update BGPPeer status" ); if let Err(e) = api .replace_status( @@ -209,7 +211,7 @@ async fn reconcile(api: &Api, bp: &BGPPeer, ctx: Arc) -> Resul tracing::info!( asn = bp.spec.asn, addr = bp.spec.addr, - "Peer doesn't exist yet" + "peer doesn't exist yet" ); speaker_client @@ -269,7 +271,7 @@ async fn reconcile(api: &Api, bp: &BGPPeer, ctx: Arc) -> Resul name = bp.name_any(), asn = bp.spec.asn, addr = bp.spec.addr, - "Reflect the newly established peer to existing BGPAdvertisements" + "reflect the newly established peer to existing BGPAdvertisements" ); let eps_api = Api::::all(ctx.client.clone()); let mut eps_list = eps_api @@ -296,7 +298,7 @@ async fn reconcile(api: &Api, bp: &BGPPeer, ctx: Arc) -> Resul name = bp.name_any(), asn = bp.spec.asn, addr = bp.spec.addr, - "Reset BGPAdvertisements" + "reset BGPAdvertisements" ); let ba_api = Api::::all(ctx.client.clone()); let mut ba_list = ba_api @@ -334,7 +336,8 @@ async fn reconcile(api: &Api, bp: &BGPPeer, ctx: Arc) -> Resul #[tracing::instrument(skip_all, fields(trace_id))] async fn cleanup(_api: &Api, bp: &BGPPeer, ctx: Arc) -> Result { - tracing::info!(name = bp.name_any(), "Cleanup BGPPeer"); + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); let timeout = match bp.spec.speaker.timeout { Some(t) => t, @@ -351,7 +354,7 @@ async fn cleanup(_api: &Api, bp: &BGPPeer, ctx: Arc) -> Result .await { Ok(_peer) => { - tracing::info!(name = bp.name_any(), addr = bp.spec.addr, "Delete peer"); + tracing::info!(name = bp.name_any(), addr = bp.spec.addr, "delete peer"); speaker_client .delete_peer(sartd_proto::sart::DeletePeerRequest { addr: bp.spec.addr.clone(), @@ -386,14 +389,13 @@ async fn cleanup(_api: &Api, bp: &BGPPeer, ctx: Arc) -> Result name = bp.name_any(), asn = bp.spec.asn, addr = bp.spec.addr, - "Clean up the peer from BGPAdvertisements" + "clean up the peer from BGPAdvertisements" ); let ba_api = Api::::all(ctx.client.clone()); let mut ba_list = ba_api .list(&ListParams::default()) .await .map_err(Error::Kube)?; - tracing::warn!(name = bp.name_any(), "Reach here"); for ba in ba_list.iter_mut() { if let Some(status) = ba.status.as_mut() { if let Some(peers) = status.peers.as_mut() { diff --git a/sartd/src/kubernetes/src/agent/reconciler/node_bgp.rs b/sartd/src/kubernetes/src/agent/reconciler/node_bgp.rs index 6f79c78..f325f9e 100644 --- a/sartd/src/kubernetes/src/agent/reconciler/node_bgp.rs +++ b/sartd/src/kubernetes/src/agent/reconciler/node_bgp.rs @@ -11,6 +11,7 @@ use kube::{ }, Api, Client, ResourceExt, }; +use tracing::{field, Span}; use crate::{ agent::{bgp::speaker, error::Error}, @@ -43,9 +44,10 @@ pub async fn reconciler(nb: Arc, ctx: Arc) -> Result, nb: &NodeBGP, ctx: Arc) -> Result { - tracing::info!(name = nb.name_any(), "Reconcile NodeBGP"); + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); // NodeBGP.spec.asn and routerId should be immutable @@ -81,7 +83,7 @@ async fn reconcile(api: &Api, nb: &NodeBGP, ctx: Arc) -> Resul name = nb.name_any(), asn = nb.spec.asn, router_id = nb.spec.router_id, - "Backoff BGP advertisement" + "backoff BGP advertisement" ); backoff_advertisements(nb, &ctx.client.clone()).await?; return Err(e); @@ -107,14 +109,14 @@ async fn reconcile(api: &Api, nb: &NodeBGP, ctx: Arc) -> Resul name = nb.name_any(), asn = nb.spec.asn, router_id = nb.spec.router_id, - "Backoff NodeBGP" + "backoff NodeBGP" ); backoff_advertisements(nb, &ctx.client.clone()).await?; tracing::warn!( name = nb.name_any(), asn = nb.spec.asn, router_id = nb.spec.router_id, - "Backoff BGP advertisement" + "backoff BGP advertisement" ); } return Err(Error::GotgPRC(e)); @@ -129,7 +131,7 @@ async fn reconcile(api: &Api, nb: &NodeBGP, ctx: Arc) -> Resul name = nb.name_any(), asn = nb.spec.asn, router_id = nb.spec.router_id, - "Configure local BGP settings" + "configure local BGP settings" ); speaker_client .set_as(sartd_proto::sart::SetAsRequest { asn: nb.spec.asn }) @@ -175,7 +177,7 @@ async fn reconcile(api: &Api, nb: &NodeBGP, ctx: Arc) -> Resul name = nb.name_any(), asn = nb.spec.asn, router_id = nb.spec.router_id, - "Update NodeBGP status" + "update NodeBGP status" ); // update status let mut new_nb = nb.clone(); @@ -216,7 +218,7 @@ async fn reconcile(api: &Api, nb: &NodeBGP, ctx: Arc) -> Resul tracing::info!( name = nb.name_any(), peer = bp.name_any(), - "Increment peer's backoff count" + "increment peer's backoff count" ); bgp_peer_api .replace_status( @@ -241,7 +243,7 @@ async fn reconcile(api: &Api, nb: &NodeBGP, ctx: Arc) -> Resul name = nb.name_any(), asn = nb.spec.asn, router_id = nb.spec.router_id, - "Local BGP settings are already configured" + "local BGP settings are already configured" ); // patch status @@ -251,7 +253,7 @@ async fn reconcile(api: &Api, nb: &NodeBGP, ctx: Arc) -> Resul reason: NodeBGPConditionReason::Configured, } } else { - tracing::warn!("Local BGP speaker configuration and NodeBGP are mismatched"); + tracing::warn!("local BGP speaker configuration and NodeBGP are mismatched"); NodeBGPCondition { status: NodeBGPConditionStatus::Unavailable, reason: NodeBGPConditionReason::InvalidConfiguration, @@ -290,7 +292,7 @@ async fn reconcile(api: &Api, nb: &NodeBGP, ctx: Arc) -> Resul name = nb.name_any(), asn = nb.spec.asn, router_id = nb.spec.router_id, - "Update NodeBGP status" + "update NodeBGP status" ); // update status new_nb.status = Some(new_status); @@ -308,12 +310,6 @@ async fn reconcile(api: &Api, nb: &NodeBGP, ctx: Arc) -> Resul // create peers based on NodeBGP.spec.peers if available { - let cluster_bgps = nb - .status - .as_ref() - .and_then(|status| status.cluster_bgp_refs.as_ref()) - .unwrap_or(&Vec::new()); - let bgp_peers = Api::::all(ctx.client.clone()); if let Some(peers) = new_nb.spec.peers.as_mut() { @@ -348,7 +344,7 @@ async fn reconcile(api: &Api, nb: &NodeBGP, ctx: Arc) -> Resul } if !errors.is_empty() { for e in errors.iter() { - tracing::error!(error=?e, "Failed to reconcile BGPPeer associated with NodeBGP"); + tracing::error!(error=?e, "failed to reconcile BGPPeer associated with NodeBGP"); } // returns ok but, this should retry to reconcile return Ok(Action::requeue(Duration::from_secs(10))); @@ -359,9 +355,10 @@ async fn reconcile(api: &Api, nb: &NodeBGP, ctx: Arc) -> Resul Ok(Action::requeue(Duration::from_secs(60))) } -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip_all, fields(trace_id))] async fn cleanup(_api: &Api, nb: &NodeBGP, _ctx: Arc) -> Result { - tracing::info!(name = nb.name_any(), "Cleanup NodeBGP"); + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); let timeout = nb.spec.speaker.timeout.unwrap_or(DEFAULT_SPEAKER_TIMEOUT); let mut speaker_client = diff --git a/sartd/src/kubernetes/src/controller/reconciler/address_block.rs b/sartd/src/kubernetes/src/controller/reconciler/address_block.rs index 252d43c..ea6fc0a 100644 --- a/sartd/src/kubernetes/src/controller/reconciler/address_block.rs +++ b/sartd/src/kubernetes/src/controller/reconciler/address_block.rs @@ -13,6 +13,7 @@ use kube::{ }; use sartd_ipam::manager::{AllocatorSet, Block}; +use tracing::{field, Span}; use crate::{ context::{error_policy, ContextWith, Ctx, State}, @@ -42,13 +43,14 @@ pub async fn reconciler( .map_err(|e| Error::Finalizer(Box::new(e))) } -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip_all, fields(trace_id))] async fn reconcile( _api: &Api, ab: &AddressBlock, ctx: Arc>>, ) -> Result { - tracing::info!(name = ab.name_any(), "reconcile AddressBlock"); + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); let component = ctx.component.clone(); let mut alloc_set = component.inner.lock().map_err(|_| Error::FailedToGetLock)?; @@ -60,7 +62,7 @@ async fn reconcile( match alloc_set.blocks.get(&ab.name_any()) { Some(_a) => { - tracing::info!(name = ab.name_any(), "Address block already exists"); + tracing::info!(name = ab.name_any(), "address block already exists"); match ab.spec.auto_assign { true => { // Check if already set @@ -69,21 +71,21 @@ async fn reconcile( if ab.name_any().ne(name) { tracing::warn!( name = ab.name_any(), - "Auto assignable block already exists." + "auto assignable block already exists." ); return Err(Error::AutoAssignMustBeOne); } } None => { alloc_set.auto_assign = Some(ab.name_any()); - tracing::info!(name = ab.name_any(), "Enable auto assign."); + tracing::info!(name = ab.name_any(), "enable auto assign."); } } } false => { if let Some(name) = &alloc_set.auto_assign { if ab.name_any().eq(name) { - tracing::info!(name = ab.name_any(), "Disable auto assign."); + tracing::info!(name = ab.name_any(), "disable auto assign."); alloc_set.auto_assign = None; } } @@ -92,7 +94,7 @@ async fn reconcile( if let Some(auto_assign_name) = &alloc_set.auto_assign { // If disable auto assign if !ab.spec.auto_assign && auto_assign_name.eq(&ab.name_any()) { - tracing::info!(name = ab.name_any(), "Disable auto assign"); + tracing::info!(name = ab.name_any(), "disable auto assign"); } } } @@ -102,7 +104,7 @@ async fn reconcile( if ab.spec.auto_assign { match &alloc_set.auto_assign { Some(_a) => { - tracing::warn!(name = ab.name_any(), "Cannot override auto assign."); + tracing::warn!(name = ab.name_any(), "cannot override auto assign."); return Err(Error::FailedToEnableAutoAssign); } None => { @@ -116,13 +118,14 @@ async fn reconcile( Ok(Action::await_change()) } -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip_all, fields(trace_id))] async fn cleanup( _api: &Api, ab: &AddressBlock, ctx: Arc>>, ) -> Result { - tracing::info!(name = ab.name_any(), "clean up AddressBlock"); + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); let component = ctx.component.clone(); let mut alloc_set = component.inner.lock().map_err(|_| Error::FailedToGetLock)?; diff --git a/sartd/src/kubernetes/src/controller/reconciler/address_pool.rs b/sartd/src/kubernetes/src/controller/reconciler/address_pool.rs index 2479d32..a8c27c6 100644 --- a/sartd/src/kubernetes/src/controller/reconciler/address_pool.rs +++ b/sartd/src/kubernetes/src/controller/reconciler/address_pool.rs @@ -11,6 +11,7 @@ use kube::{ }, Api, Client, ResourceExt, }; +use tracing::{field, Span}; use crate::{ context::{error_policy, Context, State}, @@ -36,20 +37,21 @@ pub async fn reconciler(ap: Arc, ctx: Arc) -> Result, ap: &AddressPool, ctx: Arc, ) -> Result { - tracing::info!(name = ap.name_any(), "reconcile AddressPool"); + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); match ap.spec.r#type { AddressType::Service => reconcile_service_pool(api, ap, ctx).await, } } -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip_all, fields(trace_id))] async fn reconcile_service_pool( api: &Api, ap: &AddressPool, @@ -63,7 +65,7 @@ async fn reconcile_service_pool( .map_err(Error::Kube)? { Some(ab) => { - tracing::warn!(name = ab.name_any(), "AddressBlock already exists"); + tracing::warn!(name = ab.name_any(), "address block already exists"); } None => { let ab = AddressBlock { @@ -123,7 +125,7 @@ async fn reconcile_service_pool( Ok(Action::await_change()) } -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip_all, fields(trace_id))] async fn cleanup( _api: &Api, _ap: &AddressPool, diff --git a/sartd/src/kubernetes/src/controller/reconciler/bgp_advertisement.rs b/sartd/src/kubernetes/src/controller/reconciler/bgp_advertisement.rs index d152e0e..84fbb35 100644 --- a/sartd/src/kubernetes/src/controller/reconciler/bgp_advertisement.rs +++ b/sartd/src/kubernetes/src/controller/reconciler/bgp_advertisement.rs @@ -11,6 +11,7 @@ use kube::{ }, Api, Client, ResourceExt, }; +use tracing::{field, Span}; use crate::{ context::{error_policy, Context, State}, @@ -45,13 +46,6 @@ async fn reconcile( ba: &BGPAdvertisement, _ctx: Arc, ) -> Result { - let ns = get_namespace::(ba).map_err(Error::KubeLibrary)?; - tracing::info!( - name = ba.name_any(), - namespace = ns, - "Reconcile BGPAdvertisement" - ); - Ok(Action::await_change()) } @@ -61,12 +55,10 @@ async fn cleanup( ba: &BGPAdvertisement, _ctx: Arc, ) -> Result { + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); + let ns = get_namespace::(ba).map_err(Error::KubeLibrary)?; - tracing::info!( - name = ba.name_any(), - namespace = ns, - "Cleanup BGPAdvertisement" - ); let mut new_ba = ba.clone(); let mut need_update = false; @@ -79,7 +71,7 @@ async fn cleanup( tracing::info!( name = ba.name_any(), namespace = ns, - "Successfully delete BGPAdvertisement" + "successfully delete BGPAdvertisement" ); return Ok(Action::await_change()); } @@ -93,7 +85,7 @@ async fn cleanup( tracing::info!( name = ba.name_any(), namespace = ns, - "Successfully delete BGPAdvertisement" + "successfully delete BGPAdvertisement" ); return Ok(Action::await_change()); } @@ -110,7 +102,7 @@ async fn cleanup( tracing::info!( name = &ba.name_any(), namespace = ns, - "Submit withdraw request" + "submit withdraw request" ); } diff --git a/sartd/src/kubernetes/src/controller/reconciler/cluster_bgp.rs b/sartd/src/kubernetes/src/controller/reconciler/cluster_bgp.rs index ac37605..79ace2f 100644 --- a/sartd/src/kubernetes/src/controller/reconciler/cluster_bgp.rs +++ b/sartd/src/kubernetes/src/controller/reconciler/cluster_bgp.rs @@ -12,6 +12,7 @@ use kube::{ }, Api, Client, ResourceExt, }; +use tracing::{field, Span}; use crate::{ context::{error_policy, Context, State}, @@ -43,8 +44,11 @@ pub async fn reconciler(cb: Arc, ctx: Arc) -> Result) -> Result { + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); + tracing::info!(name = cb.name_any(), "reconcile ClusterBGP"); let mut need_requeue = false; @@ -64,16 +68,12 @@ async fn reconcile(cb: &ClusterBGP, ctx: Arc) -> Result None => Vec::new(), }; - tracing::info!(name = cb.name_any(), actual=?actual_nodes, "actual nodes"); - let matched_nodes = nodes.list(&list_params).await.map_err(Error::Kube)?; let matched_node_names = matched_nodes .iter() .map(|n| n.name_any()) .collect::>(); - tracing::info!(name = cb.name_any(), matched=?matched_node_names, "matched nodes"); - let (added, remain, removed) = get_diff(&actual_nodes, &matched_node_names); let node_bgps = Api::::all(ctx.client.clone()); @@ -121,7 +121,6 @@ async fn reconcile(cb: &ClusterBGP, ctx: Arc) -> Result let peer_templ_api = Api::::all(ctx.client.clone()); let peers = get_peers(cb, &new_nb, &peer_templ_api).await?; - tracing::info!(nb=nb.name_any(), label=?new_nb.labels(),"NodeBGP label"); match new_nb.spec.peers.as_mut() { Some(nb_peers) => { for peer in peers.iter() { @@ -137,14 +136,14 @@ async fn reconcile(cb: &ClusterBGP, ctx: Arc) -> Result } } if need_spec_update { - tracing::info!(node_bgp=nb.name_any(),asn=nb.spec.asn,router_id=?nb.spec.router_id,"Update existing NodeBGP's spec.peers"); + tracing::info!(node_bgp=nb.name_any(),asn=nb.spec.asn,router_id=?nb.spec.router_id,"update existing NodeBGP's spec.peers"); node_bgps .replace(&nb.name_any(), &PostParams::default(), &new_nb) .await .map_err(Error::Kube)?; } if need_status_update { - tracing::info!(node_bgp=nb.name_any(),asn=nb.spec.asn,router_id=?nb.spec.router_id,"Update existing NodeBGP's status"); + tracing::info!(node_bgp=nb.name_any(),asn=nb.spec.asn,router_id=?nb.spec.router_id,"update existing NodeBGP's status"); node_bgps .replace_status( &nb.name_any(), @@ -180,7 +179,7 @@ async fn reconcile(cb: &ClusterBGP, ctx: Arc) -> Result nb.spec.peers = Some(peers); - tracing::info!(node_bgp=node.name_any(),asn=asn,router_id=?router_id,"Create new NodeBGP resource"); + tracing::info!(node_bgp=node.name_any(),asn=asn,router_id=?router_id,"create new NodeBGP resource"); node_bgps .create(&PostParams::default(), &nb) .await @@ -219,7 +218,7 @@ async fn reconcile(cb: &ClusterBGP, ctx: Arc) -> Result } if need_spec_update { - tracing::info!(node_bgp=nb.name_any(),asn=nb.spec.asn,router_id=?nb.spec.router_id,"Update existing NodeBGP's spec.peers"); + tracing::info!(node_bgp=nb.name_any(),asn=nb.spec.asn,router_id=?nb.spec.router_id,"update existing NodeBGP's spec.peers"); node_bgps .replace(&nb.name_any(), &PostParams::default(), &new_nb) .await @@ -260,7 +259,6 @@ async fn reconcile(cb: &ClusterBGP, ctx: Arc) -> Result } }; - tracing::info!(name = cb.name_any(), "Update ClusterBGP Status"); cluster_bgp_api .replace_status( &cb.name_any(), @@ -279,10 +277,8 @@ async fn reconcile(cb: &ClusterBGP, ctx: Arc) -> Result } // cleanup() is called when a resource is deleted -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip_all, fields(trace_id))] async fn cleanup(cb: &ClusterBGP, _ctx: Arc) -> Result { - tracing::info!(name = cb.name_any(), "clean up ClusterBGP"); - Ok(Action::await_change()) } diff --git a/sartd/src/kubernetes/src/controller/reconciler/endpointslice_watcher.rs b/sartd/src/kubernetes/src/controller/reconciler/endpointslice_watcher.rs index b834228..411b5e9 100644 --- a/sartd/src/kubernetes/src/controller/reconciler/endpointslice_watcher.rs +++ b/sartd/src/kubernetes/src/controller/reconciler/endpointslice_watcher.rs @@ -19,6 +19,7 @@ use kube::{ }, Api, Client, ResourceExt, }; +use tracing::{field, Span}; use crate::{ context::{error_policy, Context, Ctx, State}, @@ -66,11 +67,14 @@ pub async fn reconciler(eps: Arc, ctx: Arc) -> Result) -> Result { + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); + let ns = get_namespace::(eps).map_err(Error::KubeLibrary)?; tracing::info!( name = eps.name_any(), namespace = ns, - "Reconcile Endpointslice" + "reconcile Endpointslice" ); let svc_name = match get_svc_name_from_eps(eps) { @@ -83,7 +87,7 @@ async fn reconcile(eps: &EndpointSlice, ctx: Arc) -> Result { tracing::warn!( name = svc_name, - "The Service resource associated with EndpointSlice is not found" + "the Service resource associated with EndpointSlice is not found" ); return Ok(Action::await_change()); } @@ -99,7 +103,7 @@ async fn reconcile(eps: &EndpointSlice, ctx: Arc) -> Result) -> Result = BTreeMap::new(); @@ -220,7 +224,7 @@ async fn reconcile(eps: &EndpointSlice, ctx: Arc) -> Result) -> Result) -> Result { let ns = get_namespace::(eps).map_err(Error::KubeLibrary)?; - tracing::info!( - name = eps.name_any(), - namespace = ns, - "Cleanup Endpointslice" - ); - Ok(Action::await_change()) } diff --git a/sartd/src/kubernetes/src/controller/reconciler/node_watcher.rs b/sartd/src/kubernetes/src/controller/reconciler/node_watcher.rs index 00e9fb7..56bfca7 100644 --- a/sartd/src/kubernetes/src/controller/reconciler/node_watcher.rs +++ b/sartd/src/kubernetes/src/controller/reconciler/node_watcher.rs @@ -12,6 +12,7 @@ use kube::{ }, Api, Client, ResourceExt, }; +use tracing::{field, Span}; use crate::{ context::{error_policy, Context, State}, @@ -39,7 +40,8 @@ pub async fn reconciler(node: Arc, ctx: Arc) -> Result, node: &Node, ctx: Arc) -> Result { - tracing::info!(name = node.name_any(), "Reconcile Node"); + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); // sync node labels let node_bgp_api = Api::::all(ctx.client.clone()); @@ -71,7 +73,7 @@ async fn reconcile(_api: &Api, node: &Node, ctx: Arc) -> Result, node: &Node, ctx: Arc) -> Result, node: &Node, ctx: Arc) -> Result { + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); + let node_bgp_api = Api::::all(ctx.client.clone()); + tracing::info!(node = node.name_any(), "delete the NodeBGP"); + node_bgp_api .delete(&node.name_any(), &DeleteParams::default()) .await diff --git a/sartd/src/kubernetes/src/controller/reconciler/service_watcher.rs b/sartd/src/kubernetes/src/controller/reconciler/service_watcher.rs index 62d62dc..b56510e 100644 --- a/sartd/src/kubernetes/src/controller/reconciler/service_watcher.rs +++ b/sartd/src/kubernetes/src/controller/reconciler/service_watcher.rs @@ -15,6 +15,7 @@ use kube::{ }, Api, Client, ResourceExt, }; +use tracing::{field, Span}; use crate::{ context::{error_policy, ContextWith, Ctx, State}, @@ -57,9 +58,11 @@ async fn reconcile( svc: &Service, ctx: Arc>>, ) -> Result { + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); let ns = get_namespace::(svc).map_err(Error::KubeLibrary)?; - tracing::info!(name = svc.name_any(), namespace = ns, "Reconcile Service"); + tracing::info!(name = svc.name_any(), namespace = ns, "reconcile Service"); if !is_loadbalancer(svc) { // If Service is not LoadBalancer and it has allocated load balancer addresses, release these. @@ -70,7 +73,6 @@ async fn reconcile( // we check its annotation. // And if it has, we have to release its addresses. let releasable_addrs = get_releasable_addrs(svc); - tracing::info!(name = svc.name_any(), namespace = ns, releasable=?releasable_addrs, "not LoadBalancer"); if let Some(addrs) = releasable_addrs { let released = { let c = ctx.component.clone(); @@ -219,7 +221,11 @@ async fn reconcile( if actual_addrs.eq(&remained) { return Ok(Action::await_change()); } - tracing::info!(name = svc.name_any(), namespace = ns, remained=?remained, released=?removed, "Update allocation."); + tracing::info!( + name = svc.name_any(), + namespace = ns, + "update the allocation" + ); let new_svc = update_svc_lb_addrs(svc, &remained); api.replace_status( @@ -234,7 +240,7 @@ async fn reconcile( name = svc.name_any(), namespace = ns, lb_addrs=?remained, - "Update service status by the allocation lb address" + "update service status by the allocation lb address" ); let new_allocated_addrs = get_allocated_lb_addrs(&new_svc) @@ -276,8 +282,11 @@ async fn cleanup( svc: &Service, ctx: Arc>>, ) -> Result { + let trace_id = sartd_trace::telemetry::get_trace_id(); + Span::current().record("trace_id", &field::display(&trace_id)); + let ns = get_namespace::(svc).map_err(Error::KubeLibrary)?; - tracing::info!(name = svc.name_any(), namespace = ns, "Cleanup Service"); + tracing::info!(name = svc.name_any(), namespace = ns, "cleanup Service"); let allocated_addrs = match get_allocated_lb_addrs(svc) { Some(a) => a, @@ -303,7 +312,7 @@ async fn cleanup( name = svc.name_any(), namespace = ns, lb_addrs=?released, - "Update service status by the release lb address" + "update service status by the release lb address" ); Ok(Action::await_change()) @@ -316,7 +325,7 @@ pub async fn run(state: State, interval: u64, allocator_set: Arc) let services = Api::::all(client.clone()); - tracing::info!("Start Service watcher"); + tracing::info!("start Service watcher"); Controller::new(services, Config::default().any_semantic()) .shutdown_on_signal() @@ -514,7 +523,7 @@ fn release_lb_addrs( name = svc.name_any(), namespace = ns, desired_pool = pool, - "Desired AddressBlock doesn't exist" + "desired AddressBlock doesn't exist" ); continue; } @@ -531,7 +540,7 @@ fn release_lb_addrs( namespace = ns, pool = block.pool_name, address = a.to_string(), - "Relaese address from address pool" + "release the address from address pool" ) } Err(e) => { @@ -615,25 +624,6 @@ fn clear_svc_lb_addrs(svc: &Service, released: &[IpAddr]) -> Service { new_svc } -// fn get_diff(prev: &[String], now: &[String]) -> (Vec, Vec, Vec) { -// let removed = prev -// .iter() -// .filter(|p| !now.contains(p)) -// .cloned() -// .collect::>(); -// let added = now -// .iter() -// .filter(|n| !prev.contains(n) && !removed.contains(n)) -// .cloned() -// .collect::>(); -// let shared = prev -// .iter() -// .filter(|p| now.contains(p)) -// .cloned() -// .collect::>(); -// (added, shared, removed) -// } - fn merge_marked_allocation( actual_allocation: HashMap>, desired_allocation: HashMap>,