diff --git a/tracing-core/Cargo.toml b/tracing-core/Cargo.toml index 101919417f..b089cca3a2 100644 --- a/tracing-core/Cargo.toml +++ b/tracing-core/Cargo.toml @@ -8,7 +8,7 @@ name = "tracing-core" # - README.md # - Update CHANGELOG.md. # - Create "v0.1.x" git tag. -version = "0.1.7" +version = "0.1.8" authors = ["Tokio Contributors "] license = "MIT" readme = "README.md" @@ -27,16 +27,11 @@ edition = "2018" [features] default = ["std"] -std = [] +std = ["lazy_static"] [badges] azure-devops = { project = "tracing/tracing", pipeline = "tokio-rs.tracing", build = "1" } maintenance = { status = "actively-developed" } [dependencies] -lazy_static = "1" - -[target.'cfg(not(feature = "std"))'.dependencies] -spin = "0.5" -lazy_static = { version = "1", features = ["spin_no_std"] } - +lazy_static = { version = "1", optional = true } diff --git a/tracing-core/src/lazy_static/LICENSE b/tracing-core/src/lazy_static/LICENSE new file mode 100644 index 0000000000..28e478827c --- /dev/null +++ b/tracing-core/src/lazy_static/LICENSE @@ -0,0 +1,26 @@ + +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/tracing-core/src/lazy_static/core_lazy.rs b/tracing-core/src/lazy_static/core_lazy.rs new file mode 100644 index 0000000000..56cfe24ce3 --- /dev/null +++ b/tracing-core/src/lazy_static/core_lazy.rs @@ -0,0 +1,30 @@ +// Copyright 2016 lazy-static.rs Developers +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +use crate::Once; + +pub(crate) struct Lazy(Once); + +impl Lazy { + pub(crate) const INIT: Self = Lazy(Once::INIT); + + #[inline(always)] + pub(crate) fn get(&'static self, builder: F) -> &T + where + F: FnOnce() -> T, + { + self.0.call_once(builder) + } +} + +#[macro_export] +#[doc(hidden)] +macro_rules! __lazy_static_create { + ($NAME:ident, $T:ty) => { + static $NAME: $crate::lazy_static::lazy::Lazy<$T> = $crate::lazy_static::lazy::Lazy::INIT; + }; +} diff --git a/tracing-core/src/lazy_static/mod.rs b/tracing-core/src/lazy_static/mod.rs new file mode 100644 index 0000000000..137ed1b01f --- /dev/null +++ b/tracing-core/src/lazy_static/mod.rs @@ -0,0 +1,89 @@ +// Copyright 2016 lazy-static.rs Developers +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +/*! +A macro for declaring lazily evaluated statics. +Using this macro, it is possible to have `static`s that require code to be +executed at runtime in order to be initialized. +This includes anything requiring heap allocations, like vectors or hash maps, +as well as anything that requires function calls to be computed. +*/ + +#[path = "core_lazy.rs"] +pub(crate) mod lazy; + +#[doc(hidden)] +pub(crate) use core::ops::Deref as __Deref; + +#[macro_export(local_inner_macros)] +#[doc(hidden)] +macro_rules! __lazy_static_internal { + // optional visibility restrictions are wrapped in `()` to allow for + // explicitly passing otherwise implicit information about private items + ($(#[$attr:meta])* ($($vis:tt)*) static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + __lazy_static_internal!(@MAKE TY, $(#[$attr])*, ($($vis)*), $N); + __lazy_static_internal!(@TAIL, $N : $T = $e); + lazy_static!($($t)*); + }; + (@TAIL, $N:ident : $T:ty = $e:expr) => { + impl $crate::lazy_static::__Deref for $N { + type Target = $T; + fn deref(&self) -> &$T { + #[inline(always)] + fn __static_ref_initialize() -> $T { $e } + + #[inline(always)] + fn __stability() -> &'static $T { + __lazy_static_create!(LAZY, $T); + LAZY.get(__static_ref_initialize) + } + __stability() + } + } + impl $crate::lazy_static::LazyStatic for $N { + fn initialize(lazy: &Self) { + let _ = &**lazy; + } + } + }; + // `vis` is wrapped in `()` to prevent parsing ambiguity + (@MAKE TY, $(#[$attr:meta])*, ($($vis:tt)*), $N:ident) => { + #[allow(missing_copy_implementations)] + #[allow(non_camel_case_types)] + #[allow(dead_code)] + $(#[$attr])* + $($vis)* struct $N {__private_field: ()} + #[doc(hidden)] + $($vis)* static $N: $N = $N {__private_field: ()}; + }; + () => () +} + +#[macro_export(local_inner_macros)] +/// lazy_static (suppress docs_missing warning) +macro_rules! lazy_static { + ($(#[$attr:meta])* static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + // use `()` to explicitly forward the information about private items + __lazy_static_internal!($(#[$attr])* () static ref $N : $T = $e; $($t)*); + }; + ($(#[$attr:meta])* pub static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + __lazy_static_internal!($(#[$attr])* (pub) static ref $N : $T = $e; $($t)*); + }; + ($(#[$attr:meta])* pub ($($vis:tt)+) static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + __lazy_static_internal!($(#[$attr])* (pub ($($vis)+)) static ref $N : $T = $e; $($t)*); + }; + () => () +} + +/// Support trait for enabling a few common operation on lazy static values. +/// +/// This is implemented by each defined lazy static, and +/// used by the free functions in this crate. +pub(crate) trait LazyStatic { + #[doc(hidden)] + fn initialize(lazy: &Self); +} diff --git a/tracing-core/src/lib.rs b/tracing-core/src/lib.rs index c0f5e42755..a3bb882323 100644 --- a/tracing-core/src/lib.rs +++ b/tracing-core/src/lib.rs @@ -95,9 +95,6 @@ #[cfg(not(feature = "std"))] extern crate alloc; -#[macro_use] -extern crate lazy_static; - /// Statically constructs an [`Identifier`] for the provided [`Callsite`]. /// /// This may be used in contexts, such as static initializers, where the @@ -209,6 +206,27 @@ macro_rules! metadata { }; } +// std uses lazy_static from crates.io +#[cfg(feature = "std")] +#[macro_use] +extern crate lazy_static; + +// no_std uses vendored version of lazy_static 1.4.0 (4216696) with spin +// This can conflict when included in a project already using std lazy_static +// Remove this module when cargo enables specifying dependencies for no_std +#[cfg(not(feature = "std"))] +#[macro_use] +mod lazy_static; + +// Trimmed-down vendored version of spin 0.5.2 (0387621) +// Dependency of no_std lazy_static, not required in a std build +#[cfg(not(feature = "std"))] +pub(crate) mod spin; + +#[cfg(not(feature = "std"))] +#[doc(hidden)] +pub use self::spin::Once; + pub mod callsite; pub mod dispatcher; pub mod event; diff --git a/tracing-core/src/spin/LICENSE b/tracing-core/src/spin/LICENSE new file mode 100644 index 0000000000..84d5f4d7af --- /dev/null +++ b/tracing-core/src/spin/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mathijs van de Nes + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tracing-core/src/spin/mod.rs b/tracing-core/src/spin/mod.rs new file mode 100644 index 0000000000..767fcf649b --- /dev/null +++ b/tracing-core/src/spin/mod.rs @@ -0,0 +1,7 @@ +//! Synchronization primitives based on spinning + +pub(crate) use mutex::*; +pub use once::Once; + +mod mutex; +mod once; diff --git a/tracing-core/src/spin/mutex.rs b/tracing-core/src/spin/mutex.rs new file mode 100644 index 0000000000..383a2cd839 --- /dev/null +++ b/tracing-core/src/spin/mutex.rs @@ -0,0 +1,109 @@ +use core::cell::UnsafeCell; +use core::default::Default; +use core::fmt; +use core::marker::Sync; +use core::ops::{Deref, DerefMut, Drop}; +use core::option::Option::{self, None, Some}; +use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicBool, Ordering}; + +/// This type provides MUTual EXclusion based on spinning. +pub(crate) struct Mutex { + lock: AtomicBool, + data: UnsafeCell, +} + +/// A guard to which the protected data can be accessed +/// +/// When the guard falls out of scope it will release the lock. +#[derive(Debug)] +pub(crate) struct MutexGuard<'a, T: ?Sized> { + lock: &'a AtomicBool, + data: &'a mut T, +} + +// Same unsafe impls as `std::sync::Mutex` +unsafe impl Sync for Mutex {} +unsafe impl Send for Mutex {} + +impl Mutex { + /// Creates a new spinlock wrapping the supplied data. + pub(crate) const fn new(user_data: T) -> Mutex { + Mutex { + lock: AtomicBool::new(false), + data: UnsafeCell::new(user_data), + } + } +} + +impl Mutex { + fn obtain_lock(&self) { + while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false { + // Wait until the lock looks unlocked before retrying + while self.lock.load(Ordering::Relaxed) { + cpu_relax(); + } + } + } + + /// Locks the spinlock and returns a guard. + /// + /// The returned value may be dereferenced for data access + /// and the lock will be dropped when the guard falls out of scope. + pub(crate) fn lock(&self) -> MutexGuard<'_, T> { + self.obtain_lock(); + MutexGuard { + lock: &self.lock, + data: unsafe { &mut *self.data.get() }, + } + } + + /// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns + /// a guard within Some. + pub(crate) fn try_lock(&self) -> Option> { + if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false { + Some(MutexGuard { + lock: &self.lock, + data: unsafe { &mut *self.data.get() }, + }) + } else { + None + } + } +} + +impl fmt::Debug for Mutex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.try_lock() { + Some(guard) => write!(f, "Mutex {{ data: ") + .and_then(|()| (&*guard).fmt(f)) + .and_then(|()| write!(f, "}}")), + None => write!(f, "Mutex {{ }}"), + } + } +} + +impl Default for Mutex { + fn default() -> Mutex { + Mutex::new(Default::default()) + } +} + +impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { + type Target = T; + fn deref<'b>(&'b self) -> &'b T { + &*self.data + } +} + +impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { + fn deref_mut<'b>(&'b mut self) -> &'b mut T { + &mut *self.data + } +} + +impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> { + /// The dropping of the MutexGuard will release the lock it was created from. + fn drop(&mut self) { + self.lock.store(false, Ordering::Release); + } +} diff --git a/tracing-core/src/spin/once.rs b/tracing-core/src/spin/once.rs new file mode 100644 index 0000000000..0bc47b566a --- /dev/null +++ b/tracing-core/src/spin/once.rs @@ -0,0 +1,146 @@ +use core::cell::UnsafeCell; +use core::fmt; +use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering}; + +/// A synchronization primitive which can be used to run a one-time global +/// initialization. Unlike its std equivalent, this is generalized so that the +/// closure returns a value and it is stored. Once therefore acts something like +/// a future, too. +pub struct Once { + state: AtomicUsize, + data: UnsafeCell>, // TODO remove option and use mem::uninitialized +} + +impl fmt::Debug for Once { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.r#try() { + Some(s) => write!(f, "Once {{ data: ") + .and_then(|()| s.fmt(f)) + .and_then(|()| write!(f, "}}")), + None => write!(f, "Once {{ }}"), + } + } +} + +// Same unsafe impls as `std::sync::RwLock`, because this also allows for +// concurrent reads. +unsafe impl Sync for Once {} +unsafe impl Send for Once {} + +// Four states that a Once can be in, encoded into the lower bits of `state` in +// the Once structure. +const INCOMPLETE: usize = 0x0; +const RUNNING: usize = 0x1; +const COMPLETE: usize = 0x2; +const PANICKED: usize = 0x3; + +use core::hint::unreachable_unchecked as unreachable; + +impl Once { + /// Initialization constant of `Once`. + pub const INIT: Self = Once { + state: AtomicUsize::new(INCOMPLETE), + data: UnsafeCell::new(None), + }; + + /// Creates a new `Once` value. + pub const fn new() -> Once { + Self::INIT + } + + fn force_get<'a>(&'a self) -> &'a T { + match unsafe { &*self.data.get() }.as_ref() { + None => unsafe { unreachable() }, + Some(p) => p, + } + } + + /// Performs an initialization routine once and only once. The given closure + /// will be executed if this is the first time `call_once` has been called, + /// and otherwise the routine will *not* be invoked. + /// + /// This method will block the calling thread if another initialization + /// routine is currently running. + /// + /// When this function returns, it is guaranteed that some initialization + /// has run and completed (it may not be the closure specified). The + /// returned pointer will point to the result from the closure that was + /// run. + pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T + where + F: FnOnce() -> T, + { + let mut status = self.state.load(Ordering::SeqCst); + + if status == INCOMPLETE { + status = self + .state + .compare_and_swap(INCOMPLETE, RUNNING, Ordering::SeqCst); + if status == INCOMPLETE { + // We init + // We use a guard (Finish) to catch panics caused by builder + let mut finish = Finish { + state: &self.state, + panicked: true, + }; + unsafe { *self.data.get() = Some(builder()) }; + finish.panicked = false; + + status = COMPLETE; + self.state.store(status, Ordering::SeqCst); + + // This next line is strictly an optimization + return self.force_get(); + } + } + + loop { + match status { + INCOMPLETE => unreachable!(), + RUNNING => { + // We spin + cpu_relax(); + status = self.state.load(Ordering::SeqCst) + } + PANICKED => panic!("Once has panicked"), + COMPLETE => return self.force_get(), + _ => unsafe { unreachable() }, + } + } + } + + /// Returns a pointer iff the `Once` was previously initialized + pub fn r#try<'a>(&'a self) -> Option<&'a T> { + match self.state.load(Ordering::SeqCst) { + COMPLETE => Some(self.force_get()), + _ => None, + } + } + + /// Like try, but will spin if the `Once` is in the process of being + /// initialized + pub fn wait<'a>(&'a self) -> Option<&'a T> { + loop { + match self.state.load(Ordering::SeqCst) { + INCOMPLETE => return None, + RUNNING => cpu_relax(), // We spin + COMPLETE => return Some(self.force_get()), + PANICKED => panic!("Once has panicked"), + _ => unsafe { unreachable() }, + } + } + } +} + +struct Finish<'a> { + state: &'a AtomicUsize, + panicked: bool, +} + +impl<'a> Drop for Finish<'a> { + fn drop(&mut self) { + if self.panicked { + self.state.store(PANICKED, Ordering::SeqCst); + } + } +} diff --git a/tracing-core/src/stdlib.rs b/tracing-core/src/stdlib.rs index 6d78456091..4a1c17c2b8 100644 --- a/tracing-core/src/stdlib.rs +++ b/tracing-core/src/stdlib.rs @@ -49,9 +49,9 @@ mod no_std { } pub(crate) mod sync { + pub(crate) use crate::spin::MutexGuard; pub(crate) use alloc::sync::*; pub(crate) use core::sync::*; - pub(crate) use spin::MutexGuard; /// This wraps `spin::Mutex` to return a `Result`, so that it can be /// used with code written against `std::sync::Mutex`. @@ -60,17 +60,17 @@ mod no_std { /// by `lock` will always be `Ok`. #[derive(Debug, Default)] pub(crate) struct Mutex { - inner: spin::Mutex, + inner: crate::spin::Mutex, } impl Mutex { pub(crate) fn new(data: T) -> Self { Self { - inner: spin::Mutex::new(data), + inner: crate::spin::Mutex::new(data), } } - pub(crate) fn lock(&self) -> Result, ()> { + pub(crate) fn lock(&self) -> Result, ()> { Ok(self.inner.lock()) } } diff --git a/tracing-subscriber/src/layer.rs b/tracing-subscriber/src/layer.rs index 89ffe12bef..8b189c9ab6 100644 --- a/tracing-subscriber/src/layer.rs +++ b/tracing-subscriber/src/layer.rs @@ -7,7 +7,7 @@ use tracing_core::{ }; #[cfg(feature = "registry")] -use crate::registry::{self, LookupMetadata, LookupSpan}; +use crate::registry::{self, LookupMetadata, LookupSpan, Registry}; use std::{any::TypeId, marker::PhantomData}; /// A composable handler for `tracing` events. @@ -494,6 +494,13 @@ where } fn try_close(&self, id: span::Id) -> bool { + #[cfg(feature = "registry")] + let subscriber = &self.inner as &dyn Subscriber; + #[cfg(feature = "registry")] + let _guard = subscriber + .downcast_ref::() + .and_then(|registry| Some(registry.start_close(id.clone()))); + let id2 = id.clone(); if self.inner.try_close(id) { self.layer.on_close(id2, self.ctx()); diff --git a/tracing-subscriber/src/registry/sharded.rs b/tracing-subscriber/src/registry/sharded.rs index 0ca6effdc7..e5ae5f51f4 100644 --- a/tracing-subscriber/src/registry/sharded.rs +++ b/tracing-subscriber/src/registry/sharded.rs @@ -9,7 +9,7 @@ use crate::{ sync::RwLock, }; use std::{ - cell::RefCell, + cell::{Cell, RefCell}, sync::atomic::{fence, AtomicUsize, Ordering}, }; use tracing_core::{ @@ -89,6 +89,33 @@ fn id_to_idx(id: &Id) -> usize { id.into_u64() as usize - 1 } +/// A guard that tracks how many [`Registry`]-backed `Layer`s have +/// processed an `on_close` event. +/// +/// This is needed to enable a [`Registry`]-backed Layer to access span +/// data after the `Layer` has recieved the `on_close` callback. +/// +/// Once all `Layer`s have processed this event, the [`Registry`] knows +/// that is able to safely remove the span tracked by `id`. `CloseGuard` +/// accomplishes this through a two-step process: +/// 1. Whenever a [`Registry`]-backed `Layer::on_close` method is +/// called, `Registry::start_close` is closed. +/// `Registry::start_close` increments a thread-local `CLOSE_COUNT` +/// by 1 and returns a `CloseGuard`. +/// 2. The `CloseGuard` is dropped at the end of `Layer::on_close`. On +/// drop, `CloseGuard` checks thread-local `CLOSE_COUNT`. If +/// `CLOSE_COUNT` is 0, the `CloseGuard` removes the span with the +/// `id` from the registry, as all `Layers` that might have seen the +/// `on_close` notification have processed it. If `CLOSE_COUNT` is +/// greater than 0, `CloseGuard` decrements the counter by one and +/// _does not_ remove the span from the [`Registry`]. +/// +/// [`Registry`]: ./struct.Registry.html +pub(crate) struct CloseGuard<'a> { + id: Id, + registry: &'a Registry, +} + impl Registry { fn insert(&self, s: DataInner) -> Option { self.spans.insert(s) @@ -97,9 +124,31 @@ impl Registry { fn get(&self, id: &Id) -> Option> { self.spans.get(id_to_idx(id)) } + + /// Returns a guard which tracks how many `Layer`s have + /// processed an `on_close` notification via the `CLOSE_COUNT` thread-local. + /// For additional details, see [`CloseGuard`]. + /// + /// [`CloseGuard`]: ./struct.CloseGuard.html + pub(crate) fn start_close(&self, id: Id) -> CloseGuard<'_> { + CLOSE_COUNT.with(|count| { + let c = count.get(); + count.set(c + 1); + }); + CloseGuard { + id, + registry: &self, + } + } } thread_local! { + /// `CLOSE_COUNT` is the thread-local counter used by `CloseGuard` to + /// track how many layers have processed the close. + /// For additional details, see [`CloseGuard`]. + /// + /// [`CloseGuard`]: ./struct.CloseGuard.html + static CLOSE_COUNT: Cell = Cell::new(0); static CURRENT_SPANS: RefCell = RefCell::new(SpanStack::new()); } @@ -203,7 +252,6 @@ impl Subscriber for Registry { // from std::Arc); this ensures that all other `try_close` calls on // other threads happen-before we actually remove the span. fence(Ordering::Acquire); - self.spans.remove(id_to_idx(&id)); true } } @@ -247,6 +295,22 @@ impl Drop for DataInner { } } +impl<'a> Drop for CloseGuard<'a> { + fn drop(&mut self) { + // If this returns with an error, we are already panicking. At + // this point, there's nothing we can really do to recover + // except by avoiding a double-panic. + let _ = CLOSE_COUNT.try_with(|count| { + let c = count.get(); + if c > 0 { + count.set(c - 1); + } else { + self.registry.spans.remove(id_to_idx(&self.id)); + } + }); + } +} + // === impl Data === impl<'a> SpanData<'a> for Data<'a> { @@ -270,3 +334,89 @@ impl<'a> SpanData<'a> for Data<'a> { ExtensionsMut::new(self.inner.extensions.write().expect("Mutex poisoned")) } } + +#[cfg(test)] +pub(crate) mod tests { + use super::Registry; + use crate::{layer::Context, registry::LookupSpan, Layer}; + use std::sync::atomic::{AtomicBool, Ordering}; + use tracing::{self, subscriber::with_default}; + use tracing_core::{ + span::{Attributes, Id}, + Subscriber, + }; + + struct AssertionLayer; + impl Layer for AssertionLayer + where + S: Subscriber + for<'a> LookupSpan<'a>, + { + fn on_close(&self, id: Id, ctx: Context<'_, S>) { + assert!(&ctx.span(&id).is_some()); + } + } + + #[test] + fn single_layer_can_access_closed_span() { + let subscriber = AssertionLayer.with_subscriber(Registry::default()); + + with_default(subscriber, || { + let span = tracing::debug_span!("span"); + drop(span); + }); + } + + #[test] + fn multiple_layers_can_access_closed_span() { + let subscriber = AssertionLayer + .and_then(AssertionLayer) + .with_subscriber(Registry::default()); + + with_default(subscriber, || { + let span = tracing::debug_span!("span"); + drop(span); + }); + } + + #[test] + fn span_is_removed_from_registry() { + static IS_REMOVED: AtomicBool = AtomicBool::new(false); + + struct ClosingLayer; + impl Layer for ClosingLayer + where + S: Subscriber + for<'a> LookupSpan<'a>, + { + fn new_span(&self, _: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) { + let span = ctx.span(id).expect("Missing span; this is a bug"); + let mut extensions = span.extensions_mut(); + extensions.insert(ClosingSpan); + } + + fn on_close(&self, id: Id, ctx: Context<'_, S>) { + assert!(&ctx.span(&id).is_some()); + let span = &ctx.span(&id).unwrap(); + let extensions = span.extensions(); + assert!(extensions.get::().is_some()); + } + } + + struct ClosingSpan; + impl Drop for ClosingSpan { + fn drop(&mut self) { + IS_REMOVED.store(true, Ordering::Release) + } + } + + let subscriber = AssertionLayer + .and_then(ClosingLayer) + .with_subscriber(Registry::default()); + + with_default(subscriber, || { + let span = tracing::debug_span!("span"); + drop(span); + }); + + assert!(IS_REMOVED.load(Ordering::Acquire) == true); + } +} diff --git a/tracing/Cargo.toml b/tracing/Cargo.toml index dfa19ba2b3..ffdcb37881 100644 --- a/tracing/Cargo.toml +++ b/tracing/Cargo.toml @@ -27,7 +27,7 @@ keywords = ["logging", "tracing", "metrics", "async"] edition = "2018" [dependencies] -tracing-core = { version = "0.1.7", default-features = false } +tracing-core = { path = "../tracing-core", version = "0.1.8", default-features = false } log = { version = "0.4", optional = true } tracing-attributes = "0.1.5" cfg-if = "0.1.10" @@ -71,6 +71,3 @@ harness = false [badges] azure-devops = { project = "tracing/tracing", pipeline = "tokio-rs.tracing", build = "1" } maintenance = { status = "actively-developed" } - -[target.'cfg(not(feature = "std"))'.dependencies] -spin = "0.5" diff --git a/tracing/src/lib.rs b/tracing/src/lib.rs index 2e9f8bf772..79849678f5 100644 --- a/tracing/src/lib.rs +++ b/tracing/src/lib.rs @@ -701,7 +701,7 @@ pub mod __macro_support { pub use crate::stdlib::sync::Once; #[cfg(not(feature = "std"))] - pub type Once = spin::Once<()>; + pub type Once = tracing_core::Once<()>; } mod sealed {