From 41cccbdf5a81d6b0cf9257e66e876638bcc49c60 Mon Sep 17 00:00:00 2001 From: Severin Neumann Date: Tue, 19 Dec 2023 14:41:19 +0100 Subject: [PATCH] Rework the python exporters page (#3515) Signed-off-by: svrnm Co-authored-by: Patrice Chalin Co-authored-by: Phillip Carter --- .../docs/instrumentation/python/exporters.md | 453 ++++++++++++------ static/refcache.json | 16 + 2 files changed, 312 insertions(+), 157 deletions(-) diff --git a/content/en/docs/instrumentation/python/exporters.md b/content/en/docs/instrumentation/python/exporters.md index 857f608026b1..c149d04fff91 100644 --- a/content/en/docs/instrumentation/python/exporters.md +++ b/content/en/docs/instrumentation/python/exporters.md @@ -17,19 +17,172 @@ learn how to setup exporters following the {{% /alert %}} -## Console exporter +## OTLP -The console exporter is useful for development and debugging tasks, and is the -simplest to set up. +### Collector Setup -### Trace +{{% alert title="Note" color="info" %}} + +If you have a OTLP collector or backend already set up, you can skip this +section and [setup the OTLP exporter dependencies](#otlp-dependencies) for your +application. + +{{% /alert %}} + +To try out and verify your OTLP exporters, you can run the collector in a docker +container that writes telemetry directly to the console. + +In an empty directory, create a file called `collector-config.yaml` with the +following content: + +```yaml +receivers: + otlp: + protocols: + grpc: + http: +exporters: + debug: + verbosity: detailed +service: + pipelines: + traces: + receivers: [otlp] + exporters: [debug] + metrics: + receivers: [otlp] + exporters: [debug] + logs: + receivers: [otlp] + exporters: [debug] +``` + +Now run the collector in a docker container: + +```shell +docker run -p 4317:4317 -p 4318:4318 --rm -v $(pwd)/collector-config.yaml:/etc/otelcol/config.yaml otel/opentelemetry-collector +``` + +This collector is now able to accept telemetry via OTLP. Later you may want to +[configure the collector](/docs/collector/configuration) to send your telemetry +to your observability backend. + +### Dependencies {#otlp-dependencies} + +If you want to send telemetry data to an OTLP endpoint (like the +[OpenTelemetry Collector](#collector-setup), [Jaeger](#jaeger) or +[Prometheus](#prometheus)), you can choose between two different protocols to +transport your data: + +- [HTTP/protobuf](https://www.npmjs.com/package/@opentelemetry/exporter-trace-otlp-proto) +- [gRPC](https://www.npmjs.com/package/@opentelemetry/exporter-trace-otlp-grpc) + +Start by installing the respective exporter packages as a dependency for your +project: + +{{< tabpane text=true >}} {{% tab "HTTP/Proto" %}} + +```shell +pip install opentelemetry-exporter-otlp-proto-http +``` + +{{% /tab %}} {{% tab gRPC %}} + +```shell +pip install opentelemetry-exporter-otlp-proto-grpc +``` + +{{% /tab %}} {{< /tabpane >}} + +### Usage + +Next, configure the exporter to point at an OTLP endpoint in your code. + +{{< tabpane text=true >}} {{% tab "HTTP/Proto" %}} ```python +from opentelemetry.sdk.resources import SERVICE_NAME, Resource + from opentelemetry import trace +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter +from opentelemetry.sdk.trace.export import BatchSpanProcessor + +from opentelemetry import metrics +from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader + +# Service name is required for most backends +resource = Resource(attributes={ + SERVICE_NAME: "your-service-name" +}) + +traceProvider = TracerProvider(resource=resource) +processor = BatchSpanProcessor(OTLPSpanExporter(endpoint="/v1/traces")) +traceProvider.add_span_processor(processor) +trace.set_tracer_provider(traceProvider) + +reader = PeriodicExportingMetricReader( + OTLPMetricExporter(endpoint="/v1/metrics") +) +meterProvider = MeterProvider(resource=resource, metric_readers=[reader]) +metrics.set_meter_provider(meterProvider) +``` + +{{% /tab %}} {{% tab gRPC %}} + +```python from opentelemetry.sdk.resources import SERVICE_NAME, Resource +from opentelemetry import trace +from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor + +from opentelemetry import metrics +from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader + +# Service name is required for most backends +resource = Resource(attributes={ + SERVICE_NAME: "your-service-name" +}) + +traceProvider = TracerProvider(resource=resource) +processor = BatchSpanProcessor(OTLPSpanExporter(endpoint="your-endpoint-here")) +traceProvider.add_span_processor(processor) +trace.set_tracer_provider(traceProvider) + +reader = PeriodicExportingMetricReader( + OTLPMetricExporter(endpoint="localhost:5555") +) +meterProvider = MeterProvider(resource=resource, metric_readers=[reader]) +metrics.set_meter_provider(meterProvider) +``` + +{{% /tab %}} {{< /tabpane >}} + +## Console + +To debug your instrumentation or see the values locally in development, you can +use exporters writing telemetry data to the console (stdout). + +The `ConsoleSpanExporter` and `ConsoleMetricExporter` are included in the +`opentelemetry-sdk` package. + +```python +from opentelemetry.sdk.resources import SERVICE_NAME, Resource + +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter + +from opentelemetry import metrics +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader, ConsoleMetricExporter + # Service name is required for most backends, # and although it's not necessary for console export, # it's good to set service name anyways. @@ -37,23 +190,17 @@ resource = Resource(attributes={ SERVICE_NAME: "your-service-name" }) -provider = TracerProvider(resource=resource) +traceProvider = TracerProvider(resource=resource) processor = BatchSpanProcessor(ConsoleSpanExporter()) -provider.add_span_processor(processor) -trace.set_tracer_provider(provider) +traceProvider.add_span_processor(processor) +trace.set_tracer_provider(traceProvider) -# Merrily go about tracing! +reader = PeriodicExportingMetricReader(ConsoleMetricExporter()) +meterProvider = MeterProvider(resource=resource, metric_readers=[reader]) +metrics.set_meter_provider(meterProvider) ``` -### Metrics - -Use a [`PeriodicExportingMetricReader`][pemr] to periodically print metrics to -the console. `PeriodicExportingMetricReader` can be configured to export at a -different interval, change the -[temporality](/docs/specs/otel/metrics/data-model/#temporality) for each -instrument kind, or change the default aggregation for each instrument kind. - -#### Temporality Presets +{{% alert title="Note" color="info" %}} There are temporality presets for each instrumentation kind. These presets can be set with the environment variable @@ -99,63 +246,90 @@ Setting `OTEL_EXPORTER_METRICS_TEMPORALITY_PREFERENCE` to any other value than `CUMULATIVE`, `DELTA` or `LOWMEMORY` will log a warning and set this environment variable to `CUMULATIVE`. -```python -from opentelemetry import metrics -from opentelemetry.sdk.metrics import MeterProvider -from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader, ConsoleMetricExporter -from opentelemetry.sdk.resources import SERVICE_NAME, Resource +{{% /alert %}} -# Service name is required for most backends, -# and although it's not necessary for console export, -# it's good to set service name anyways. -resource = Resource(attributes={ - SERVICE_NAME: "your-service-name" -}) +## Jaeger -reader = PeriodicExportingMetricReader(ConsoleMetricExporter()) -provider = MeterProvider(resource=resource, metric_readers=[reader]) -metrics.set_meter_provider(provider) +[Jaeger](https://www.jaegertracing.io/) natively supports OTLP to receive trace +data. You can run Jaeger in a docker container with the UI accessible on port +16686 and OTLP enabled on ports 4137 and 4138: + +```shell +docker run --rm \ + -e COLLECTOR_ZIPKIN_HOST_PORT=:9411 \ + -p 16686:16686 \ + -p 4317:4317 \ + -p 4318:4318 \ + -p 9411:9411 \ + jaegertracing/all-in-one:latest ``` -## OTLP endpoint or Collector +Now following the instruction to setup the [OTLP exporters](#otlp-dependencies). -To send data to an OTLP endpoint or the -[OpenTelemetry Collector](/docs/collector/getting-started/), you'll want to -configure an OTLP exporter that sends to your endpoint. +## Prometheus -First, install an OTLP exporter: +To send your metric data to [Prometheus](https://prometheus.io/), you can either +[enable Prometheus' OTLP Receiver](https://prometheus.io/docs/prometheus/latest/feature_flags/#otlp-receiver) +and use the [OTLP exporter](#otlp), or you can use the `PrometheusExporter`. -```sh -pip install opentelemetry-exporter-otlp-proto-grpc +### Backend Setup {#prometheus-setup} + +{{% alert title="Note" color="info" %}} + +If you have Prometheus or a Prometheus-compatible backend already set up, you +can skip this section and setup the [Prometheus](#prometheus-dependencies) or +[OTLP](#otlp-dependencies) exporter dependencies for your application. + +{{% /alert %}} + +You can run [Prometheus](https://prometheus.io/) in a docker container, +accessible on port `9090` by following these instructions: + +Create a file called `prometheus.yml` with the following content: + +```yaml +scrape_configs: + - job_name: dice-service + scrape_interval: 5s + static_configs: + - targets: [host.docker.internal:9464] ``` -### Trace +Run Prometheus in a docker container with the UI accessible on port `9090`: -```python -from opentelemetry import trace -from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter -from opentelemetry.sdk.resources import SERVICE_NAME, Resource -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import BatchSpanProcessor +```shell +docker run --rm -v ${PWD}/prometheus.yml:/prometheus/prometheus.yml -p 9090:9090 prom/prometheus --enable-feature=otlp-write-receive +``` -# Service name is required for most backends -resource = Resource(attributes={ - SERVICE_NAME: "your-service-name" -}) +{{% alert title="Note" color="info" %}} -provider = TracerProvider(resource=resource) -processor = BatchSpanProcessor(OTLPSpanExporter(endpoint="your-endpoint-here")) -provider.add_span_processor(processor) -trace.set_tracer_provider(provider) +When using Prometheus' OTLP Receiver, make sure that you set the OTLP endpoint +for metrics in your application to `http://localhost:9090/api/v1/otlp`. -# Merrily go about tracing! +Not all docker environments support `host.docker.internal`. In some cases you +may need to replace `host.docker.internal` with `localhost` or the IP address of +your machine. + +{{% /alert %}} + +### Dependencies {#prometheus-dependencies} + +Install the +[exporter package](https://pypi.org/project/opentelemetry-exporter-prometheus/) +as a dependency for your application: + +```sh +pip install opentelemetry-exporter-prometheus ``` -### Metrics +Update your OpenTelemetry configuration to use the exporter and to send data to +your Prometheus backend: ```python +from prometheus_client import start_http_server + from opentelemetry import metrics -from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter +from opentelemetry.exporter.prometheus import PrometheusMetricReader from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader from opentelemetry.sdk.resources import SERVICE_NAME, Resource @@ -165,70 +339,66 @@ resource = Resource(attributes={ SERVICE_NAME: "your-service-name" }) -reader = PeriodicExportingMetricReader( - OTLPMetricExporter(endpoint="localhost:5555") -) +# Start Prometheus client +start_http_server(port=9464, addr="localhost") +# Initialize PrometheusMetricReader which pulls metrics from the SDK +# on-demand to respond to scrape requests +reader = PrometheusMetricReader() provider = MeterProvider(resource=resource, metric_readers=[reader]) metrics.set_meter_provider(provider) ``` -### Using HTTP +With the above you can access your metrics at . +Prometheus or an OpenTelemetry Collector with the Prometheus receiver can scrape +the metrics from this endpoint. -If you'd prefer to use [OTLP/HTTP](/docs/specs/otlp/#otlphttp) with the -binary-encoded protobuf format, you can install the package: +## Zipkin -```sh -pip install opentelemetry-exporter-otlp-proto-http -``` +### Backend Setup {#zipkin-setup} -Next, replace the import declarations with the following: +{{% alert title="Note" color="info" %}} -```python -from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter -``` +If you have Zipkin or a Zipkin-compatible backend already set up, you can skip +this section and setup the [Zipkin exporter dependencies](#zipkin-dependencies) +for your application. -Finally, update your exporter endpoint if you're specifying it in code: +{{% /alert %}} -```python -OTLPSpanExporter(endpoint="/v1/traces") +You can run [Zipkin](https://zipkin.io/) on in a Docker container by executing +the following command: + +```shell +docker run --rm -d -p 9411:9411 --name zipkin openzipkin/zipkin ``` -There is not currently an OTLP/HTTP metric exporter. +### Dependencies {#zipkin-dependencies} -## Jaeger +To send your trace data to [Zipkin](https://zipkin.io/), , you can choose +between two different protocols to transport your data: -[Jaeger](https://jaegertracing.io) natively supports OTLP. Follow the -instructions on -[setting up the OTLP exporter above](#otlp-endpoint-or-collector). You can then -run Jaeger in a docker container with the UI accessible on port 16686 and OTLP -enabled on ports 4317 and 4318: +- [HTTP/protobuf](https://pypi.org/project/opentelemetry-exporter-zipkin-proto-http/) +- [Thrift](https://pypi.org/project/opentelemetry-exporter-zipkin-json/) + +Install the exporter package as a dependency for your application: + +{{< tabpane text=true >}} {{% tab "HTTP/Proto" %}} ```shell -docker run --rm \ - -e COLLECTOR_ZIPKIN_HOST_PORT=:9411 \ - -p 16686:16686 \ - -p 4317:4317 \ - -p 4318:4318 \ - -p 9411:9411 \ - jaegertracing/all-in-one:latest +pip install opentelemetry-exporter-zipkin-proto-http ``` -## Zipkin - -If you are using [Zipkin](https://zipkin.io/) to visualize trace data, you'll -need to set it up first. This is how to run it in a docker container: +{{% /tab %}} {{% tab Thrift %}} -```sh -docker run --rm -d -p 9411:9411 --name zipkin openzipkin/zipkin +```shell +pip install opentelemetry-exporter-zipkin-json ``` -Next, install the Zipkin exporter package: +{{% /tab %}} {{< /tabpane >}} -```sh -pip install opentelemetry-exporter-zipkin-proto-http -``` +Update your OpenTelemetry configuration to use the exporter and to send data to +your Zipkin backend: -Then you can configure the exporter when initializing tracing: +{{< tabpane text=true >}} {{% tab "HTTP/Proto" %}} ```python from opentelemetry import trace @@ -247,80 +417,49 @@ provider = TracerProvider(resource=resource) processor = BatchSpanProcessor(zipkin_exporter) provider.add_span_processor(processor) trace.set_tracer_provider(provider) - -# merrily go about tracing! -``` - -### Using JSON - -If you'd prefer to use Thrift as the protocol, you can install the package: - -```sh -pip install opentelemetry-exporter-zipkin-json ``` -And replace the `ZipkinExporter` import declaration with the following: +{{% /tab %}} {{% tab Thrift %}} ```python +from opentelemetry import trace from opentelemetry.exporter.zipkin.json import ZipkinExporter -``` - -## Prometheus +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.sdk.resources import SERVICE_NAME, Resource -If you are using [Prometheus](https://prometheus.io/) to collect metrics data, -you'll need to set it up first. +resource = Resource(attributes={ + SERVICE_NAME: "your-service-name" +}) -First create a config file: +zipkin_exporter = ZipkinExporter(endpoint="http://localhost:9411/api/v2/spans") -```bash -cat > prometheus.yml <}} -```sh -docker run -d --rm \ - --network=host \ - -v $(pwd)/prometheus.yml:/etc/prometheus/prometheus.yml \ - prom/prometheus -``` +## Other available exporters -Next, install the Prometheus exporter package: +There are many other exporters available. For a list of available exporters, see +the [registry](/ecosystem/registry/?language=python&component=exporter). -```sh -pip install opentelemetry-exporter-prometheus -``` +Finally, you can also write your own exporter. For more information, see the +[SpanExporter Interface in the API documentation](https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.SpanExporter). -Then you can configure the exporter when initializing metrics: +## Batching spans -```python -from prometheus_client import start_http_server +For traces the OpenTelemetry SDK provides a set of default span processors, that +allow you to either emit spans one-by-one or batched. Using the +`BatchSpanProcessor` is recommended, but if you do not want to batch your spans, +you can use the `SimpleSpanProcessor` instead as follows: -from opentelemetry import metrics -from opentelemetry.exporter.prometheus import PrometheusMetricReader -from opentelemetry.sdk.metrics import MeterProvider -from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader -from opentelemetry.sdk.resources import SERVICE_NAME, Resource - -# Service name is required for most backends -resource = Resource(attributes={ - SERVICE_NAME: "your-service-name" -}) +```python +from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk.trace.export import SimpleSpanProcessor -# Start Prometheus client -start_http_server(port=8000, addr="localhost") -# Initialize PrometheusMetricReader which pulls metrics from the SDK -# on-demand to respond to scrape requests -reader = PrometheusMetricReader() -provider = MeterProvider(resource=resource, metric_readers=[reader]) -metrics.set_meter_provider(provider) +processor = SimpleSpanProcessor(OTLPSpanExporter(endpoint="your-endpoint-here")) ``` - -[pemr]: - https://opentelemetry-python.readthedocs.io/en/latest/sdk/metrics.export.html#opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader diff --git a/static/refcache.json b/static/refcache.json index fd1b65291ccc..3873cf0d87ec 100644 --- a/static/refcache.json +++ b/static/refcache.json @@ -4971,6 +4971,10 @@ "StatusCode": 200, "LastSeen": "2023-06-29T18:46:14.19804-04:00" }, + "https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.SpanExporter": { + "StatusCode": 200, + "LastSeen": "2023-11-08T16:22:53.779771+01:00" + }, "https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.html": { "StatusCode": 200, "LastSeen": "2023-06-29T18:46:03.591875-04:00" @@ -5391,6 +5395,18 @@ "StatusCode": 206, "LastSeen": "2023-06-30T11:43:42.332023-04:00" }, + "https://pypi.org/project/opentelemetry-exporter-prometheus/": { + "StatusCode": 206, + "LastSeen": "2023-11-08T16:22:52.2717+01:00" + }, + "https://pypi.org/project/opentelemetry-exporter-zipkin-json/": { + "StatusCode": 206, + "LastSeen": "2023-11-08T16:22:53.166698+01:00" + }, + "https://pypi.org/project/opentelemetry-exporter-zipkin-proto-http/": { + "StatusCode": 206, + "LastSeen": "2023-11-08T16:22:52.72657+01:00" + }, "https://qryn.metrico.in/#/support": { "StatusCode": 206, "LastSeen": "2023-10-17T15:10:07.18892+02:00"