From 99e51c1980076315f8b6933992122768baba886c Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Sun, 6 Jun 2021 09:54:24 -0700 Subject: [PATCH] Update lib and tracing module docs with examples (#563) --- README.md | 12 +- opentelemetry-otlp/src/lib.rs | 2 +- opentelemetry-semantic-conventions/src/lib.rs | 2 - opentelemetry/README.md | 9 +- opentelemetry/src/global/mod.rs | 11 +- opentelemetry/src/global/trace.rs | 14 +- opentelemetry/src/lib.rs | 111 ++++++--- .../src/sdk/metrics/aggregators/ddsketch.rs | 2 +- opentelemetry/src/sdk/trace/runtime.rs | 4 +- opentelemetry/src/sdk/trace/span_processor.rs | 22 +- opentelemetry/src/sdk/trace/tracer.rs | 2 +- opentelemetry/src/trace/mod.rs | 228 +++++++++++------- opentelemetry/src/trace/span_context.rs | 2 +- opentelemetry/src/trace/tracer.rs | 2 +- 14 files changed, 270 insertions(+), 153 deletions(-) diff --git a/README.md b/README.md index fff2cdebd1..f8ca9944fd 100644 --- a/README.md +++ b/README.md @@ -34,17 +34,18 @@ observability tools. ## Getting Started ```rust -use opentelemetry::{sdk::export::trace::stdout, trace::Tracer}; +use opentelemetry::{global, sdk::export::trace::stdout, trace::Tracer}; -fn main() -> Result<(), Box> { - // Create a new instrumentation pipeline +fn main() { + // Create a new trace pipeline that prints to stdout let tracer = stdout::new_pipeline().install_simple(); tracer.in_span("doing_work", |cx| { // Traced app logic here... }); - Ok(()) + // Shutdown trace pipeline + global::shutdown_tracer_provider(); } ``` @@ -76,7 +77,7 @@ In particular, the following crates are likely to be of interest: otel conventions. - [`opentelemetry-stackdriver`] provides an exporter for Google's [Cloud Trace] (which used to be called StackDriver). - + Additionally, there are also several third-party crates which are not maintained by the `opentelemetry` project. These include: @@ -89,7 +90,6 @@ maintained by the `opentelemetry` project. These include: - [`opentelemetry-tide`] provides integration for the [`Tide`] web server and ecosystem. - If you're the maintainer of an `opentelemetry` ecosystem crate not listed above, please let us know! We'd love to add your project to the list! diff --git a/opentelemetry-otlp/src/lib.rs b/opentelemetry-otlp/src/lib.rs index c989041932..9892e56357 100644 --- a/opentelemetry-otlp/src/lib.rs +++ b/opentelemetry-otlp/src/lib.rs @@ -394,7 +394,7 @@ impl TonicPipelineBuilder { Ok(build_simple_with_exporter(exporter, self.trace_config)) } - /// Install a trace exporter using [tonic] as grpc lazer and a batch span processor using the + /// Install a trace exporter using [tonic] as grpc layer and a batch span processor using the /// specified runtime. /// /// Returns a [`Tracer`] with the name `opentelemetry-otlp` and current crate version. diff --git a/opentelemetry-semantic-conventions/src/lib.rs b/opentelemetry-semantic-conventions/src/lib.rs index a29fca0b67..c3c679726a 100644 --- a/opentelemetry-semantic-conventions/src/lib.rs +++ b/opentelemetry-semantic-conventions/src/lib.rs @@ -1,5 +1,3 @@ -//! # OpenTelemetry Semantic Conventions -//! //! OpenTelemetry semantic conventions are agreed standardized naming patterns //! for OpenTelemetry things. This crate aims to be the centralized place to //! interact with these conventions. diff --git a/opentelemetry/README.md b/opentelemetry/README.md index 5938f9509f..010f61758f 100644 --- a/opentelemetry/README.md +++ b/opentelemetry/README.md @@ -33,17 +33,18 @@ observability tools. ## Getting Started ```rust -use opentelemetry::{sdk::export::trace::stdout, trace::Tracer}; +use opentelemetry::{global, sdk::export::trace::stdout, trace::Tracer}; -fn main() -> Result<(), Box> { - // Create a new instrumentation pipeline +fn main() { + // Create a new trace pipeline that prints to stdout let tracer = stdout::new_pipeline().install_simple(); tracer.in_span("doing_work", |cx| { // Traced app logic here... }); - Ok(()) + // Shutdown trace pipeline + global::shutdown_tracer_provider(); } ``` diff --git a/opentelemetry/src/global/mod.rs b/opentelemetry/src/global/mod.rs index 1ab560f8ac..d874765fd2 100644 --- a/opentelemetry/src/global/mod.rs +++ b/opentelemetry/src/global/mod.rs @@ -64,7 +64,7 @@ //! # } //! ``` //! -//! [installing a trace pipeline]: crate::sdk::export::trace::stdout::PipelineBuilder::install +//! [installing a trace pipeline]: crate::sdk::export::trace::stdout::PipelineBuilder::install_simple //! [`TracerProvider`]: crate::trace::TracerProvider //! [`Span`]: crate::trace::Span //! @@ -80,7 +80,7 @@ //! //! ### Usage in Applications //! -//! Applications configure their tracer either by [installing a metrics pipeline], +//! Applications configure their meter either by [installing a metrics pipeline], //! or calling [`set_meter_provider`]. //! //! ``` @@ -99,8 +99,8 @@ //! //! fn do_something_instrumented() { //! // Then you can get a named tracer instance anywhere in your codebase. -//! let tracer = global::meter("my-component"); -//! let counter = tracer.u64_counter("my_counter").init(); +//! let meter = global::meter("my-component"); +//! let counter = meter.u64_counter("my_counter").init(); //! //! // record metrics //! counter.add(1, &[KeyValue::new("mykey", "myvalue")]); @@ -117,7 +117,6 @@ //! ``` //! # #[cfg(feature="metrics")] //! # { -//! use opentelemetry::trace::Tracer; //! use opentelemetry::{global, KeyValue}; //! //! pub fn my_traced_library_function() { @@ -132,7 +131,7 @@ //! # } //! ``` //! -//! [installing a metrics pipeline]: crate::sdk::export::metrics::stdout::StdoutExporterBuilder::try_init +//! [installing a metrics pipeline]: crate::sdk::export::metrics::stdout::StdoutExporterBuilder::init //! [`MeterProvider`]: crate::metrics::MeterProvider //! [`set_meter_provider`]: crate::global::set_meter_provider diff --git a/opentelemetry/src/global/trace.rs b/opentelemetry/src/global/trace.rs index 5dbeb649f4..b78ef8e45d 100644 --- a/opentelemetry/src/global/trace.rs +++ b/opentelemetry/src/global/trace.rs @@ -328,11 +328,11 @@ pub fn force_flush_tracer_provider() { // threads Use cargo test -- --ignored --test-threads=1 to run those tests. mod tests { use super::*; + #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] + use crate::runtime; + #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use crate::sdk::trace::TraceRuntime; - use crate::{ - runtime, - trace::{NoopTracer, Tracer}, - }; + use crate::trace::{NoopTracer, Tracer}; use std::{ fmt::Debug, io::Write, @@ -480,6 +480,7 @@ mod tests { assert!(second_resp.contains("thread 2")); } + #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] fn build_batch_tracer_provider( assert_writer: AssertWriter, runtime: R, @@ -501,6 +502,7 @@ mod tests { .build() } + #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] async fn test_set_provider_in_tokio(runtime: R) -> AssertWriter { let buffer = AssertWriter::new(); let _ = set_tracer_provider(build_batch_tracer_provider(buffer.clone(), runtime)); @@ -529,6 +531,7 @@ mod tests { // Test if the multiple thread tokio runtime could exit successfully when not force flushing spans #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore = "requires --test-threads=1"] + #[cfg(feature = "rt-tokio")] async fn test_set_provider_multiple_thread_tokio() { let assert_writer = test_set_provider_in_tokio(runtime::Tokio).await; assert_eq!(assert_writer.len(), 0); @@ -537,6 +540,7 @@ mod tests { // Test if the multiple thread tokio runtime could exit successfully when force flushing spans #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore = "requires --test-threads=1"] + #[cfg(feature = "rt-tokio")] async fn test_set_provider_multiple_thread_tokio_shutdown() { let assert_writer = test_set_provider_in_tokio(runtime::Tokio).await; shutdown_tracer_provider(); @@ -562,6 +566,7 @@ mod tests { // Test if the single thread tokio runtime could exit successfully when not force flushing spans #[tokio::test] #[ignore = "requires --test-threads=1"] + #[cfg(feature = "rt-tokio-current-thread")] async fn test_set_provider_single_thread_tokio() { let assert_writer = test_set_provider_in_tokio(runtime::TokioCurrentThread).await; assert_eq!(assert_writer.len(), 0) @@ -570,6 +575,7 @@ mod tests { // Test if the single thread tokio runtime could exit successfully when force flushing spans. #[tokio::test] #[ignore = "requires --test-threads=1"] + #[cfg(feature = "rt-tokio-current-thread")] async fn test_set_provider_single_thread_tokio_shutdown() { let assert_writer = test_set_provider_in_tokio(runtime::TokioCurrentThread).await; shutdown_tracer_provider(); diff --git a/opentelemetry/src/lib.rs b/opentelemetry/src/lib.rs index 7c9d0ade5c..9771776512 100644 --- a/opentelemetry/src/lib.rs +++ b/opentelemetry/src/lib.rs @@ -1,5 +1,3 @@ -//! The Rust [OpenTelemetry](https://opentelemetry.io/) implementation. -//! //! OpenTelemetry provides a single set of APIs, libraries, agents, and collector //! services to capture distributed traces and metrics from your application. You //! can analyze them using [Prometheus], [Jaeger], and other observability tools. @@ -10,30 +8,95 @@ //! [Jaeger]: https://www.jaegertracing.io //! [msrv]: #supported-rust-versions //! -//! ## Getting Started +//! # Getting Started //! //! ```no_run //! # #[cfg(feature = "trace")] //! # { -//! use opentelemetry::{sdk::export::trace::stdout, trace::Tracer, global}; +//! use opentelemetry::{global, sdk::export::trace::stdout, trace::Tracer}; //! -//! fn main() -> Result<(), Box> { -//! // Create a new instrumentation pipeline +//! fn main() { +//! // Create a new trace pipeline that prints to stdout //! let tracer = stdout::new_pipeline().install_simple(); //! //! tracer.in_span("doing_work", |cx| { //! // Traced app logic here... //! }); //! -//! global::shutdown_tracer_provider(); // sending remaining spans -//! -//! Ok(()) -//! } +//! // Shutdown trace pipeline +//! global::shutdown_tracer_provider(); //! } +//! # } +//! ``` +//! +//! See the [examples] directory for different integration patterns. +//! +//! [examples]: https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples +//! +//! # Traces +//! +//! The [`trace`] module includes types for tracking the progression of a single +//! request while it is handled by services that make up an application. A trace +//! is a tree of [`Span`]s which are objects that represent the work being done +//! by individual services or components involved in a request as it flows +//! through a system. +//! +//! ### Creating and exporting spans +//! +//! ``` +//! # #[cfg(feature = "trace")] +//! # { +//! use opentelemetry::{global, trace::{Span, Tracer}, KeyValue}; +//! +//! // get a tracer from a provider +//! let tracer = global::tracer("my_service"); +//! +//! // start a new span +//! let mut span = tracer.start("my_span"); +//! +//! // set some attributes +//! span.set_attribute(KeyValue::new("http.client_ip", "83.164.160.102")); +//! +//! // perform some more work... +//! +//! // end or drop the span to export +//! span.end(); +//! # } +//! ``` +//! +//! See the [`trace`] module docs for more information on creating and managing +//! spans. +//! +//! [`Span`]: crate::trace::Span +//! +//! # Metrics +//! +//! Note: the metrics specification is **still in progress** and **subject to major +//! changes**. +//! +//! The [`metrics`] module includes types for recording measurements about a +//! service at runtime. +//! +//! ### Creating instruments and recording measurements +//! //! ``` +//! # #[cfg(feature = "metrics")] +//! # { +//! use opentelemetry::{global, KeyValue}; //! -//! See the [examples](https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples) -//! directory for different integration patterns. +//! // get a meter from a provider +//! let meter = global::meter("my_service"); +//! +//! // create an instrument +//! let counter = meter.u64_counter("my_counter").init(); +//! +//! // record a measurement +//! counter.add(1, &[KeyValue::new("http.client_ip", "83.164.160.102")]); +//! # } +//! ``` +//! +//! See the [`metrics`] module docs for more information on creating and +//! managing instruments. //! //! ## Crate Feature Flags //! @@ -54,28 +117,6 @@ //! [async-std]: https://crates.io/crates/async-std //! [serde]: https://crates.io/crates/serde //! -//! ## Working with runtimes -//! -//! Opentelemetry API & SDK supports different runtimes. When working with async runtime, we recommend -//! to use batch span processors where the spans will be sent in batch, reducing the number of requests -//! and resource needed. -//! -//! Batch span processors need to run a background task to collect and send spans. Different runtimes -//! need different ways to handle the background task. Using a `Runtime` that's not compatible with the -//! underlying runtime can cause deadlock. -//! -//! ### Tokio -//! -//! Tokio currently offers two different schedulers. One is `current_thread_scheduler`, the other is -//! `multiple_thread_scheduler`. Both of them default to use batch span processors to install span exporters. -//! -//! But for `current_thread_scheduler`. It can cause the program to hang forever if we schedule the backgroud -//! task with other tasks in the same runtime. Thus, users should enable `rt-tokio-current-thread` feature -//! to ask the background task be scheduled on a different runtime on a different thread. -//! -//! Note that by default `#[tokio::test]` uses `current_thread_scheduler` and should use `rt-tokio-current-thread` -//! feature. -//! //! ## Related Crates //! //! In addition to `opentelemetry`, the [`open-telemetry/opentelemetry-rust`] @@ -180,7 +221,7 @@ pub mod global; pub mod sdk; #[cfg(feature = "testing")] -#[allow(missing_docs)] +#[doc(hidden)] pub mod testing; pub mod baggage; diff --git a/opentelemetry/src/sdk/metrics/aggregators/ddsketch.rs b/opentelemetry/src/sdk/metrics/aggregators/ddsketch.rs index 9930d10c8e..c0c366a30b 100644 --- a/opentelemetry/src/sdk/metrics/aggregators/ddsketch.rs +++ b/opentelemetry/src/sdk/metrics/aggregators/ddsketch.rs @@ -7,7 +7,7 @@ //! //! DDSKetch, on the contrary, employs relative error rate that could work well on long tail dataset. //! -//! The detail of this algorithm can be found in https://arxiv.org/pdf/1908.10693 +//! The detail of this algorithm can be found in use std::{ any::Any, diff --git a/opentelemetry/src/sdk/trace/runtime.rs b/opentelemetry/src/sdk/trace/runtime.rs index 118f59041a..6954561838 100644 --- a/opentelemetry/src/sdk/trace/runtime.rs +++ b/opentelemetry/src/sdk/trace/runtime.rs @@ -2,7 +2,7 @@ //! Trace runtime is an extension to [`Runtime`]. Currently it provides a channel that used //! by [`BatchSpanProcessor`]. //! -//! [`BatchSpanProcessor`]: crate::sdk::trace::span_processor::BatchSpanProcessor +//! [`BatchSpanProcessor`]: crate::sdk::trace::BatchSpanProcessor //! [`Runtime`]: crate::runtime::Runtime #[cfg(feature = "rt-async-std")] use crate::runtime::AsyncStd; @@ -34,7 +34,7 @@ const CHANNEL_CLOSED_ERROR: &str = /// Trace runtime is an extension to [`Runtime`]. Currently it provides a channel that used /// by [`BatchSpanProcessor`]. /// -/// [`BatchSpanProcessor`]: crate::sdk::trace::span_processor::BatchSpanProcessor +/// [`BatchSpanProcessor`]: crate::sdk::trace::BatchSpanProcessor /// [`Runtime`]: crate::runtime::Runtime pub trait TraceRuntime: Runtime { /// A future stream to receive the batch messages from channels. diff --git a/opentelemetry/src/sdk/trace/span_processor.rs b/opentelemetry/src/sdk/trace/span_processor.rs index b77cba9ac7..6a5feec5d9 100644 --- a/opentelemetry/src/sdk/trace/span_processor.rs +++ b/opentelemetry/src/sdk/trace/span_processor.rs @@ -171,6 +171,24 @@ impl SpanProcessor for SimpleSpanProcessor { /// A [`SpanProcessor`] that asynchronously buffers finished spans and reports /// them at a preconfigured interval. /// +/// Batch span processors need to run a background task to collect and send +/// spans. Different runtimes need different ways to handle the background task. +/// +/// Note: Configuring an opentelemetry `Runtime` that's not compatible with the +/// underlying runtime can cause deadlocks (see tokio section). +/// +/// ### Use with Tokio +/// +/// Tokio currently offers two different schedulers. One is +/// `current_thread_scheduler`, the other is `multiple_thread_scheduler`. Both +/// of them default to use batch span processors to install span exporters. +/// +/// Tokio's `current_thread_scheduler` can cause the program to hang forever if +/// blocking work is scheduled with other tasks in the same runtime. To avoid +/// this, be sure to enable the `rt-tokio-current-thread` feature in this crate +/// if you are using that runtime (e.g. users of actix-web), and blocking tasks +/// will then be scheduled on a different thread. +/// /// # Examples /// /// This processor can be configured with an [`executor`] of your choice to @@ -191,9 +209,7 @@ impl SpanProcessor for SimpleSpanProcessor { /// // Configure your preferred exporter /// let exporter = apitrace::NoopSpanExporter::new(); /// -/// // Then build a batch processor. You can use whichever executor you have available, for -/// // example if you are using `async-std` instead of `tokio` you can replace the spawn and -/// // interval functions with `async_std::task::spawn` and `async_std::stream::interval`. +/// // Create a batch span processor using an exporter and a runtime /// let batch = sdktrace::BatchSpanProcessor::builder(exporter, runtime::Tokio) /// .with_max_queue_size(4096) /// .build(); diff --git a/opentelemetry/src/sdk/trace/tracer.rs b/opentelemetry/src/sdk/trace/tracer.rs index 8824295363..d038e10da5 100644 --- a/opentelemetry/src/sdk/trace/tracer.rs +++ b/opentelemetry/src/sdk/trace/tracer.rs @@ -6,7 +6,7 @@ //! The `Tracer` is responsible for tracking the currently active `Span`, //! and exposes methods for creating and activating new `Spans`. //! -//! Docs: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/api-tracing.md#tracer +//! Docs: use crate::sdk::trace::SpanLimits; use crate::sdk::{ trace::{ diff --git a/opentelemetry/src/trace/mod.rs b/opentelemetry/src/trace/mod.rs index c64d7a8d44..f41b78cae1 100644 --- a/opentelemetry/src/trace/mod.rs +++ b/opentelemetry/src/trace/mod.rs @@ -1,115 +1,171 @@ -//! # OpenTelemetry Tracing API. +//! The `trace` module includes types for tracking the progression of a single +//! request while it is handled by services that make up an application. A trace +//! is a tree of [`Span`]s which are objects that represent the work being done +//! by individual services or components involved in a request as it flows +//! through a system. This module implements the OpenTelemetry [trace +//! specification]. //! -//! The tracing API consists of a few main traits: -//! -//! * The `Tracer` trait which describes all tracing operations. -//! * The `Span` trait with is a mutable object storing information about the -//! current operation execution. -//! * The `SpanContext` struct is the portion of a `Span` which must be -//! serialized and propagated along side of a distributed context +//! [trace specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.3.0/specification/trace/api.md //! -//! ## Tracer +//! ## Getting Started //! -//! The OpenTelemetry library achieves in-process context propagation of `Span`s by -//! way of the `Tracer`. +//! In application code: //! -//! The `Tracer` is responsible for tracking the currently active `Span`, and -//! exposes methods for creating and activating new `Span`s. The `Tracer` is -//! configured with `Propagator`s which support transferring span context across -//! process boundaries. +//! ```no_run +//! # #[cfg(feature = "trace")] +//! # { +//! use opentelemetry::{global, sdk::export::trace::stdout, trace::Tracer}; //! -//! `Tracer`s are generally expected to be used as singletons. Implementations -//! SHOULD provide a single global default `Tracer`. +//! fn main() { +//! // Create a new trace pipeline that prints to stdout +//! let tracer = stdout::new_pipeline().install_simple(); //! -//! Some applications may require multiple `Tracer` instances, e.g. to create -//! `Span`s on behalf of other applications. Implementations MAY provide a global -//! registry of `Tracer`s for such applications. +//! tracer.in_span("doing_work", |cx| { +//! // Traced app logic here... +//! }); //! -//! ## Span +//! // Shutdown trace pipeline +//! global::shutdown_tracer_provider(); +//! } +//! # } +//! ``` //! -//! A `Span` represents a single operation within a trace. Spans can be nested to -//! form a trace tree. Each trace contains a root span, which typically describes -//! the end-to-end latency and, optionally, one or more sub-spans for its -//! sub-operations. +//! In library code: //! -//! `Span`s encapsulate: +//! ``` +//! # #[cfg(feature = "trace")] +//! # { +//! use opentelemetry::{global, trace::{Span, Tracer, TracerProvider}}; //! -//! - The span name -//! - An immutable `SpanContext` that uniquely identifies the `Span` -//! - A parent span in the form of a `SpanContext`, or None -//! - A start timestamp -//! - An end timestamp -//! - An ordered mapping of `Attribute`s -//! - A list of `Link`s to other `Span`s -//! - A list of timestamped `Event`s -//! - A `Status`. +//! fn my_library_function() { +//! // Use the global tracer provider to get access to the user-specified +//! // tracer configuration +//! let tracer_provider = global::tracer_provider(); //! -//! The _span name_ is a human-readable string which concisely identifies the work -//! represented by the `Span`, for example, an RPC method name, a function name, -//! or the name of a subtask or stage within a larger computation. The span name -//! should be the most general string that identifies a (statistically) interesting -//! _class of Spans_, rather than individual Span instances. That is, "get_user" is -//! a reasonable name, while "get_user/314159", where "314159" is a user ID, is not -//! a good name due to its high cardinality. +//! // Get a tracer for this library +//! let tracer = tracer_provider.get_tracer("my_name", Some(env!("CARGO_PKG_VERSION"))); //! -//! For example, here are potential span names for an endpoint that gets a -//! hypothetical account information: +//! // Create spans +//! let mut span = tracer.start("doing_work"); //! -//! | Span Name | Guidance | -//! | ------------------------- | ------------ | -//! | `get` | Too general | -//! | `get_account/42` | Too specific | -//! | `get_account` | Good, and account_id=42 would make a nice Span attribute | -//! | `get_account/{accountId}` | Also good (using the "HTTP route") | +//! // Do work... //! -//! The `Span`'s start and end timestamps reflect the elapsed real time of the -//! operation. A `Span`'s start time SHOULD be set to the current time on span -//! creation. After the `Span` is created, it SHOULD be possible to -//! change the its name, set its `Attribute`s, and add `Link`s and `Event`s. These -//! MUST NOT be changed after the `Span`'s end time has been set. +//! // End the span +//! span.end(); +//! } +//! # } +//! ``` //! -//! `Span`s are not meant to be used to propagate information within a process. To -//! prevent misuse, implementations SHOULD NOT provide access to a `Span`'s -//! attributes besides its `SpanContext`. +//! ## Overview //! -//! Vendors may implement the `Span` interface to effect vendor-specific logic. -//! However, alternative implementations MUST NOT allow callers to create `Span`s -//! directly. All `Span`s MUST be created via a `Tracer`. +//! The tracing API consists of a three main traits: //! -//! ## SpanContext +//! * [`TracerProvider`]s are the entry point of the API. They provide access to +//! `Tracer`s. +//! * [`Tracer`]s are types responsible for creating `Span`s. +//! * [`Span`]s provide the API to trace an operation. //! -//! A `SpanContext` represents the portion of a `Span` which must be serialized and -//! propagated along side of a distributed context. `SpanContext`s are immutable. -//! `SpanContext`. +//! ## Working with Async Runtimes //! -//! The OpenTelemetry `SpanContext` representation conforms to the [w3c TraceContext -//! specification](https://www.w3.org/TR/trace-context/). It contains two -//! identifiers - a `TraceId` and a `SpanId` - along with a set of common -//! `TraceFlags` and system-specific `TraceState` values. +//! Exporting spans often involves sending data over a network or performing +//! other I/O tasks. OpenTelemetry allows you to schedule these tasks using +//! whichever runtime you area already using such as [Tokio] or [async-std]. +//! When using an async runtime it's best to use the [`BatchSpanProcessor`] +//! where the spans will be sent in batches as opposed to being sent once ended, +//! which often ends up being more efficient. //! -//! `TraceId` A valid trace identifier is a non-zero `u128` +//! [`BatchSpanProcessor`]: crate::sdk::trace::BatchSpanProcessor +//! [Tokio]: https://tokio.rs +//! [async-std]: https://async.rs //! -//! `SpanId` A valid span identifier is a non-zero `u64` byte. +//! ## Managing Active Spans //! -//! `TraceFlags` contain details about the trace. Unlike Tracestate values, -//! TraceFlags are present in all traces. Currently, the only `TraceFlags` is a -//! boolean `sampled` -//! [flag](https://www.w3.org/TR/trace-context/#trace-flags). +//! Spans can be marked as "active" for a given [`Context`], and all newly +//! created spans will automatically be children of the currently active span. //! -//! `Tracestate` carries system-specific configuration data, represented as a list -//! of key-value pairs. TraceState allows multiple tracing systems to participate in -//! the same trace. +//! The active span for a given thread can be managed via [`get_active_span`] +//! and [`mark_span_as_active`]. //! -//! `IsValid` is a boolean flag which returns true if the SpanContext has a non-zero -//! TraceID and a non-zero SpanID. -//! -//! `IsRemote` is a boolean flag which returns true if the SpanContext was propagated -//! from a remote parent. When creating children from remote spans, their IsRemote -//! flag MUST be set to false. +//! [`Context`]: crate::Context //! -//! Please review the W3C specification for details on the [Tracestate -//! field](https://www.w3.org/TR/trace-context/#tracestate-field). +//! ``` +//! # #[cfg(feature = "trace")] +//! # { +//! use opentelemetry::{global, trace::{self, Span, StatusCode, Tracer, TracerProvider}}; //! +//! fn may_error(rand: f32) { +//! if rand < 0.5 { +//! // Get the currently active span to record additional attributes, +//! // status, etc. +//! trace::get_active_span(|span| { +//! span.set_status(StatusCode::Error, "value too small".into()); +//! }); +//! } +//! } +//! +//! // Get a tracer +//! let tracer = global::tracer("my_tracer"); +//! +//! // Create a span +//! let span = tracer.start("parent_span"); +//! +//! // Mark the span as active +//! let active = trace::mark_span_as_active(span); +//! +//! // Any span created here will be a child of `parent_span`... +//! +//! // Drop the guard and the span will no longer be active +//! drop(active) +//! # } +//! ``` +//! +//! Additionally [`Tracer::with_span`] and [`Tracer::in_span`] can be used as shorthand to +//! simplify managing the parent context. +//! +//! ``` +//! # #[cfg(feature = "trace")] +//! # { +//! use opentelemetry::{global, trace::Tracer}; +//! +//! // Get a tracer +//! let tracer = global::tracer("my_tracer"); +//! +//! // Use `in_span` to create a new span and mark it as the parent, dropping it +//! // at the end of the block. +//! tracer.in_span("parent_span", |cx| { +//! // spans created here will be children of `parent_span` +//! }); +//! +//! // Use `with_span` to mark a span as active for a given period. +//! let span = tracer.start("parent_span"); +//! tracer.with_span(span, |cx| { +//! // spans created here will be children of `parent_span` +//! }); +//! # } +//! ``` +//! +//! #### Async active spans +//! +//! Async spans can be propagated with [`TraceContextExt`] and [`FutureExt`]. +//! +//! ``` +//! # #[cfg(feature = "trace")] +//! # { +//! use opentelemetry::{Context, global, trace::{FutureExt, TraceContextExt, Tracer}}; +//! +//! async fn some_work() { } +//! +//! // Get a tracer +//! let tracer = global::tracer("my_tracer"); +//! +//! // Start a span +//! let span = tracer.start("my_span"); +//! +//! // Perform some async work with this span as the currently active parent. +//! some_work().with_context(Context::current_with_span(span)); +//! # } +//! ``` + use ::futures::channel::{mpsc::TrySendError, oneshot::Canceled}; use thiserror::Error; diff --git a/opentelemetry/src/trace/span_context.rs b/opentelemetry/src/trace/span_context.rs index 0cdded93af..bbbe58620f 100644 --- a/opentelemetry/src/trace/span_context.rs +++ b/opentelemetry/src/trace/span_context.rs @@ -7,7 +7,7 @@ //! It contains two identifiers - a `TraceId` and a `SpanId` - along with a set of common //! `TraceFlags` and system-specific `TraceState` values. //! -//! The spec can be viewed here: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/api-tracing.md#SpanContext +//! The spec can be viewed here: //! //! [w3c TraceContext specification]: https://www.w3.org/TR/trace-context/ #[cfg(feature = "serialize")] diff --git a/opentelemetry/src/trace/tracer.rs b/opentelemetry/src/trace/tracer.rs index 259d54716b..d161b410b0 100644 --- a/opentelemetry/src/trace/tracer.rs +++ b/opentelemetry/src/trace/tracer.rs @@ -313,7 +313,7 @@ pub trait Tracer: fmt::Debug + 'static { /// `SpanBuilder` allows span attributes to be configured before the span /// has started. /// -/// ```rust +/// ``` /// use opentelemetry::{ /// global, /// trace::{TracerProvider, SpanBuilder, SpanKind, Tracer},