diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 2b4fbc08e997..31218c002afb 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -16,11 +16,12 @@ * @open-telemetry/docs-approvers # content owners -content-modules/opamp-spec @open-telemetry/docs-approvers @open-telemetry/opamp-spec-approvers -content-modules/opentelemetry-proto @open-telemetry/docs-approvers @open-telemetry/specs-approvers -content-modules/opentelemetry-specification @open-telemetry/docs-approvers @open-telemetry/specs-approvers -content-modules/semantic-conventions @open-telemetry/docs-approvers @open-telemetry/specs-semconv-approvers -content/en/blog/ @open-telemetry/docs-approvers @open-telemetry/blog-approvers +content-modules/ @open-telemetry/docs-maintainers +content-modules/opamp-spec @open-telemetry/docs-maintainers @open-telemetry/opamp-spec-approvers +content-modules/opentelemetry-proto @open-telemetry/docs-maintainers @open-telemetry/specs-approvers +content-modules/opentelemetry-specification @open-telemetry/docs-maintainers @open-telemetry/specs-approvers +content-modules/semantic-conventions @open-telemetry/docs-maintainers @open-telemetry/specs-semconv-approvers +content/en/blog/ @open-telemetry/docs-maintainers content/en/community/end-user/ @open-telemetry/docs-approvers @open-telemetry/end-user-wg content/en/docs/collector @open-telemetry/docs-approvers @open-telemetry/collector-approvers content/en/docs/demo @open-telemetry/docs-approvers @open-telemetry/demo-approvers diff --git a/.github/workflows/auto-update-versions.yml b/.github/workflows/auto-update-versions.yml index 30410835fa56..9b4363908e6f 100644 --- a/.github/workflows/auto-update-versions.yml +++ b/.github/workflows/auto-update-versions.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Use CLA approved github bot run: | diff --git a/.github/workflows/check-format.yml b/.github/workflows/check-format.yml index f640d0eb3365..84096c8b11e1 100644 --- a/.github/workflows/check-format.yml +++ b/.github/workflows/check-format.yml @@ -8,21 +8,21 @@ jobs: name: FILENAME check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - run: npm run check:filenames check-formatting: name: FILE FORMAT runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Create NPM cache-hash input file run: | mkdir -p tmp jq '{devDependencies, dependencies, engines, gitHubActionCacheKey}' package.json > tmp/package-ci.json - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: node-version-file: .nvmrc cache: npm diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index ec0a4729510e..2f6630a2bb2c 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -8,7 +8,7 @@ jobs: name: BUILD and CHECK LINKS runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Create NPM cache-hash input file run: | @@ -24,7 +24,7 @@ jobs: package.json > tmp/package-min.json cp tmp/package-min.json package.json - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: node-version-file: .nvmrc cache: npm @@ -46,7 +46,7 @@ jobs: needs: build-and-check-links runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/download-artifact@v3 with: { name: build-log-etc } - run: npm run diff:fail @@ -56,7 +56,7 @@ jobs: needs: build-and-check-links runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/download-artifact@v3 with: { name: build-log-etc } - run: cat tmp/build-log.txt diff --git a/.github/workflows/check-spelling.yml b/.github/workflows/check-spelling.yml index 21e46c66b30e..6f00491f9c76 100644 --- a/.github/workflows/check-spelling.yml +++ b/.github/workflows/check-spelling.yml @@ -8,7 +8,7 @@ jobs: name: SPELLING check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: streetsidesoftware/cspell-action@v2 with: # Files should be consistent with check:spelling files @@ -22,7 +22,7 @@ jobs: name: CSPELL:IGNORE check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - run: npm run fix:dict - name: Any changed files? run: | diff --git a/.github/workflows/check-text.yml b/.github/workflows/check-text.yml index 420ca649df3f..9516a2c4e602 100644 --- a/.github/workflows/check-text.yml +++ b/.github/workflows/check-text.yml @@ -8,7 +8,7 @@ jobs: name: TEXT linter runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 # Make sure that we only install the dependencies for textlint to speed up install - run: | mkdir -p tmp @@ -23,7 +23,7 @@ jobs: name: MARKDOWN linter runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Create and use reduced-dependencies package.json run: | diff --git a/.github/workflows/pr-actions.yml b/.github/workflows/pr-actions.yml index 1b5392f1a9f0..1d70daba921f 100644 --- a/.github/workflows/pr-actions.yml +++ b/.github/workflows/pr-actions.yml @@ -20,6 +20,7 @@ jobs: contains(github.event.comment.body, '/fix:format') permissions: contents: write + pull-requests: write steps: - name: Context info @@ -27,9 +28,15 @@ jobs: echo $PR_NUM echo $COMMENT - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - run: gh pr checkout $PR_NUM + - name: Write start comment + run: | + gh pr comment $PR_NUM -b "You triggered fix:format action run at $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" + env: + GH_TOKEN: ${{ github.token }} + + - run: gh pr checkout $PR_NUM -b "pr-action-${RANDOM}" env: GH_TOKEN: ${{ github.token }} @@ -38,7 +45,7 @@ jobs: mkdir -p tmp jq '{devDependencies, dependencies, engines, gitHubActionCacheKey}' package.json > tmp/package-ci.json - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: node-version-file: .nvmrc cache: npm @@ -57,14 +64,27 @@ jobs: git add -A current_branch=$(git rev-parse --abbrev-ref HEAD) echo current_branch=$current_branch + # gh pr checkout sets some git configs that we can use to make sure + # we push to the right repo & to the right branch + remote_repo=$(git config --get branch.${current_branch}.remote) + echo remote_repo=$remote_repo + remote_branch=$(git config --get branch.${current_branch}.merge) + echo remote_branch=$remote_branch git commit -m 'Results from /fix:format' - git push origin $current_branch + git push ${remote_repo} HEAD:${remote_branch} else echo "No changes to commit" fi env: GH_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }} + - name: Report an error in the case of failure + if: ${{ failure() || cancelled() }} + run: | + gh pr comment $PR_NUM -b "fix:format run failed, please check $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID for details" + env: + GH_TOKEN: ${{ github.token }} + fix-refcache: name: /fix:refcache runs-on: ubuntu-latest @@ -84,9 +104,17 @@ jobs: echo $PR_NUM echo $COMMENT - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - run: gh pr checkout $PR_NUM + - name: Write start comment + run: | + gh pr comment $PR_NUM -b "You triggered fix:refcache action run at $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" + env: + GH_TOKEN: ${{ github.token }} + + # By providing a branch name the checkout will not break if a branch with the + # same name exists already upstream (e.g. patch-X) + - run: gh pr checkout $PR_NUM -b "pr-action-${RANDOM}" env: GH_TOKEN: ${{ github.token }} @@ -95,7 +123,7 @@ jobs: mkdir -p tmp jq '{devDependencies, dependencies, engines, gitHubActionCacheKey}' package.json > tmp/package-ci.json - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: node-version-file: .nvmrc cache: npm @@ -115,10 +143,23 @@ jobs: git add -A current_branch=$(git rev-parse --abbrev-ref HEAD) echo current_branch=$current_branch - git commit -m 'Results from /fix:recache' - git push origin $current_branch + # gh pr checkout sets some git configs that we can use to make sure + # we push to the right repo & to the right branch + remote_repo=$(git config --get branch.${current_branch}.remote) + echo remote_repo=$remote_repo + remote_branch=$(git config --get branch.${current_branch}.merge) + echo remote_branch=$remote_branch + git commit -m 'Results from /fix:refcache' + git push ${remote_repo} HEAD:${remote_branch} else echo "No changes to commit" fi env: GH_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }} + + - name: Report an error in the case of failure + if: ${{ failure() || cancelled() }} + run: | + gh pr comment $PR_NUM -b "fix:recache run failed, please check $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID for details" + env: + GH_TOKEN: ${{ github.token }} diff --git a/.htmltest.yml b/.htmltest.yml index d29cf03b3de6..e82fd4381d0a 100644 --- a/.htmltest.yml +++ b/.htmltest.yml @@ -1,9 +1,11 @@ -CacheExpires: 4400h # ~ 6 months +CacheExpires: 6000h # ~ 8.2 months DirectoryPath: public IgnoreDirectoryMissingTrailingSlash: true IgnoreAltMissing: true IgnoreCanonicalBrokenLinks: false CheckMailto: false +IgnoreDirs: + - ^blog/(\d+/)?page/\d+ IgnoreInternalURLs: # list of paths IgnoreURLs: # list of regexs of paths or URLs to be ignored - ^/docs/instrumentation/\w+/(api|examples)/$ @@ -18,6 +20,9 @@ IgnoreURLs: # list of regexs of paths or URLs to be ignored - ^https?://127\.0\.0\.1\b - ^https?://(otel-demo|traefik)\.localhost + # OpAMP spec: + - ^https://pdf.sciencedirectassets.com/280203/1-s2.0-S1877050919X0006X/1-s2.0-S1877050919303576/main.pdf\? + # Sites that deny access, always yielding 403 Forbidden (unless mentioned otherwise) - ^https://(www\.)?linkedin\.com\b # 999 Request Denied - ^https://(www\.)?mvnrepository\.com @@ -41,13 +46,5 @@ IgnoreURLs: # list of regexs of paths or URLs to be ignored - ^https://crates\.io/crates # TODO: drop after fix to https://github.com/micrometer-metrics/micrometer-docs/issues/239 - ^https://micrometer\.io/docs - # TODO: drop after fix to https://github.com/google/docsy/issues/1337 - - ^https://opentelemetry\.io/. - # TODO: drop after fix to https://github.com/open-telemetry/opentelemetry.io/issues/2354 - - ^https://open-telemetry\.github\.io/opentelemetry-python/benchmarks/ - # TODO: remove after merge of https://github.com/open-telemetry/opentelemetry.io/pull/2594 - - ^https://elastic.co/blog/ecs-elastic-common-schema-otel-opentelemetry-announcement - # TODO: remove after OpAMP spec is fixed: https://github.com/open-telemetry/opamp-spec/issues/148 - - ^https://example.com:4318/v1/metrics - - ^https://pdf.sciencedirectassets.com/280203/1-s2.0-S1877050919X0006X - - ^https://research.fb.com/wp-content/uploads/2016/11/holistic-configuration-management-at-facebook.pdf + # TODO: ensure .json isn't set as an alternate in ecosystem/registry/index.html and then drop: + - ^https://opentelemetry.io/ecosystem/registry/index.json diff --git a/.textlintrc.yml b/.textlintrc.yml index b4220053d365..5d93741e5cb3 100644 --- a/.textlintrc.yml +++ b/.textlintrc.yml @@ -14,7 +14,11 @@ filters: # Hugo template syntax: - /{{.*?}}/ - /{{%.*?%}}/ - # Other + # Custom header anchors in markdown headings: + - /{#.*?}/ + # src attribute in figure Hugo template: + - /src=".*?"/ + # Other: - // # Raw URLs rules: terminology: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ee6cbf54c21a..6802c7dabc37 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -76,7 +76,16 @@ For small changes to a single file, you can edit directly in GitHub by clicking **Edit this file** button. After forking the repository, follow the instructions in [Editing files][]. -For everything else, follow the +However, formatting may still be needed, like reducing line lengths in the +edited file. The options for fixing formatting are: + +- Checking out the project and running the CLI scripts mentioned in + [Submitting a change](#submitting-a-change). +- Commenting `/fix:format` on your pull request to trigger an automated script. + This requires a unique branch name, which can be edited under _View all + branches_ in your fork. + +For larger fixes, follow the [instructions to setup a development environment](#development) below. ### PR Guidelines @@ -212,8 +221,8 @@ The website is built from the following content: ### Submitting a change -Before submitting a to the repository, run the following command and address any -reported issues. Also commit any files changed by the `fix` script: +Before submitting a change to the repository, run the following command and +address any reported issues. Also commit any files changed by the `fix` script: ```sh npm run test-and-fix diff --git a/README.md b/README.md index 451b347436d4..ae4de4622615 100644 --- a/README.md +++ b/README.md @@ -74,11 +74,29 @@ schedule. Meeting notes are available as a public [Google doc][]. If you have trouble accessing the doc, please get in touch on [Slack][]. -Roles: +Here is a list of community roles with current and previous members: - Approvers: [@open-telemetry/docs-approvers][] + + - [Fabrizio Ferri-Benedetti](https://github.com/theletterf), Splunk + - [Michael Hausenblas](https://github.com/mhausenblas), Amazon + - Maintainers: [@open-telemetry/docs-maintainers][] -- Blog approvers: [@open-telemetry/blog-approvers][] + + - [Austin Parker](https://github.com/austinlparker), Honeycomb + - [Patrice Chalin](https://github.com/chalin), CNCF + - [Phillip Carter](https://github.com/cartermp), Honeycomb + - [Severin Neumann](https://github.com/svrnm), Cisco + +- Emeritus approvers: + + - [Paul Bruce](https://github.com/paulsbruce) + +- Emeritus maintainers: + + - [Steve Flanders](https://github.com/flands) + - [Morgan McLean](https://github.com/mtwo) + - [jparsana](https://github.com/jparsana) Learn more about roles in the [community repository][]. Thanks to [all who have already contributed][contributors]! @@ -91,8 +109,6 @@ already contributed][contributors]! [adding to the registry]: https://opentelemetry.io/ecosystem/registry/adding/ [let us know]: https://github.com/open-telemetry/opentelemetry.io/issues/new/choose -[@open-telemetry/blog-approvers]: - https://github.com/orgs/open-telemetry/teams/blog-approvers [@open-telemetry/docs-approvers]: https://github.com/orgs/open-telemetry/teams/docs-approvers [@open-telemetry/docs-maintainers]: diff --git a/archetypes/blog.md b/archetypes/blog.md index 91999afca973..3fbeb86a57d3 100644 --- a/archetypes/blog.md +++ b/archetypes/blog.md @@ -17,7 +17,8 @@ With contributions from secondary-author-name-1, ..., and secondary-author-n. ## Top-level heading -Top-level headings start at **level 2**, as shown above. +Top-level headings start at **level 2**. This means, that your post should not +include `# headings` for top-level headings but `## headings` instead. ## Paragraphs @@ -44,3 +45,6 @@ npm run format ``` Happy writing! + +**Note:** If you view this page with the GitHub file viewer, you can safely +ignore the `Error in user YAML` at the top of this page. diff --git a/assets/scss/_styles_project.scss b/assets/scss/_styles_project.scss index 228c908bf1c6..a5744e664c08 100644 --- a/assets/scss/_styles_project.scss +++ b/assets/scss/_styles_project.scss @@ -275,14 +275,6 @@ body.td-page--draft .td-content { } } -// TODO(@chalin): upstream -.tab-body { - > .highlight:only-child { - margin: -1.5rem; - max-width: calc(100% + 3rem); - } -} - details { margin-bottom: $paragraph-margin-bottom; } diff --git a/content-modules/opamp-spec b/content-modules/opamp-spec index 7718250cb5d3..36c8e0a2eba9 160000 --- a/content-modules/opamp-spec +++ b/content-modules/opamp-spec @@ -1 +1 @@ -Subproject commit 7718250cb5d3982a366c06fb151d67b57048c9c7 +Subproject commit 36c8e0a2eba98b939934632accb8e8253b4a0ea7 diff --git a/content-modules/semantic-conventions b/content-modules/semantic-conventions index da1dbb567f0f..73720582560b 160000 --- a/content-modules/semantic-conventions +++ b/content-modules/semantic-conventions @@ -1 +1 @@ -Subproject commit da1dbb567f0f0c866dde79a7564a58ab8019b1cf +Subproject commit 73720582560b5a020df8aee90504fcb411f62713 diff --git a/content/en/blog/2023/logs-collection/index.md b/content/en/blog/2023/logs-collection/index.md new file mode 100644 index 000000000000..2b538f48cebd --- /dev/null +++ b/content/en/blog/2023/logs-collection/index.md @@ -0,0 +1,481 @@ +--- +title: Collecting Logs with OpenTelemetry Python +linkTitle: Python Logs Collection +date: 2023-11-21 +author: '[Michael Hausenblas](https://github.com/mhausenblas) (AWS)' +# prettier-ignore +cSpell:ignore: asctime Chehab dataprepper exgru fileconsumer filelog Grogu grogu hossko Houssam levelname logfile otelbin Padawan Prepper svrnm WORKDIR yoda +--- + +In the following, we will walk through how to do +[logs collection](/docs/concepts/signals/logs/) with OpenTelemetry (OTel). To +keep things simple, we will use Python as the demonstration programming +language, however note that at time of writing the logs support there is still +early days so things might need some updating. + +We will show the evolution from using print statements for logging (_Baby Grogu_ +level) to logging to a file along with the +[OpenTelemetry Collector](/docs/collector/) (_Expert Grogu_ level) to using the +OTel logs bridge API to directly ingest [OTLP](/docs/specs/otlp/) (_Yoda_ level) +into the Collector. + +If you want to follow along, you need Docker installed and first off, go ahead +and `git clone https://github.com/mhausenblas/ref.otel.help.git` and change into +the `how-to/logs-collection/` directory. + +## Baby Grogu level + +We start our journey with Baby Grogu, an alias to protect the innocent ;) They +are a junior developer who is somewhat familiar with Python, however, doesn't +know or care about telemetry, more precisely, about logging. So, Baby Grogu one +day is asked to write a "Practice The Telemetry" piece of code including +catching bad input. What will the code look like and how will Baby Grogu deal +with communicating progress in the code execution and potential error cases to +the outside world? + +To get started, first change into the [baby-grogu/][repo-baby-grogu] directory. + +We're using Baby Grogu's Python code in `baby-grogu/main.py` as an example, with +the interesting part located in the `practice()` function: + +```python +start_time = time.time() +try: + how_long_int = int(how_long) + print(f"Starting to practice The Telemetry for {how_long_int} second(s)") + while time.time() - start_time < how_long_int: + next_char = random.choice(string.punctuation) + print(next_char, end="", flush=True) + time.sleep(0.5) + print("\nDone practicing") +except ValueError as ve: + print(f"I need an integer value for the time to practice: {ve}") + return False +except Exception as e: + print(f"An unexpected error occurred: {e}") + return False +return True +``` + +The above Python code doesn't really do anything useful, just printing out +random punctuation for the specified time, which represents the "practicing". +However, do notice the different semantics of the `print()` function Baby Grogu +is using here. + +For example, when they say `print(next_char, end="", flush=True)` they're +actually performing work, whereas when they write `print("\nDone practicing")` +that's an informational message that the work is completed. This would be a +great candidate for a log message! + +The same is true for +`print(f"I need an integer value for the time to practice: {ve}")`, which really +is Baby Grogu communicating that an error has occurred. + +To execute the code you can either directly run it with `python3 main.py 3` to +have Baby Grogu practice for 3 seconds, or you can use a containerized version +(Python 3.11 required). + +For the containerized version, we're using the following `Dockerfile`: + +```docker +FROM python:3.11 +WORKDIR /usr/src/app +COPY . . +``` + +Above Dockerfile, we use in the context of the following Docker Compose file +`docker-compose.yaml`: + +```yaml +version: '3' +services: + baby-grogu: + build: . + command: python main.py 3 + volumes: + - .:/usr/src/app +``` + +At this point you can enjoy Baby Grogu's efforts by running it with +`docker compose -f docker-compose.yaml` and you should see an output akin to +something shown in the following (note: edited to focus on the most important +bits): + +```shell +baby-grogu-baby-grogu-1 | Starting to practice The Telemetry for 2 second(s) +baby-grogu-baby-grogu-1 | /)|| +baby-grogu-baby-grogu-1 | Done practicing +baby-grogu-baby-grogu-1 | Practicing The Telemetry completed: True +``` + +OK, Baby Grogu did a good job, now it's time to rest. Go get up, drink a bit of +water, and when you come back with a fresh mind, let's up the game and use OTel! + +## Expert Grogu level + +Over time, Baby Grogu has learned about observability and telemetry +specifically. They have advanced to Expert Grogu level. How? Glad you asked, let +me show you. + +First, change into the [expert-grogu/][repo-expert-grogu] directory. + +In this scenario Expert Grogu is logging into a file (in JSON format), from +their Python app. Then, they are using the OpenTelemetry Collector to read that +very log file, parse the log records using the [filelog receiver][filelog] in +the OTel collector and finally output it to `stdout` using the [debug +exporter][debug]. Makes sense? Let's see it in action … + +Overall, we have the following setup: + +```mermaid +flowchart LR + app["python main.py"] --> logfile["exgru.log"] + logfile["exgru.log"] --> otelcol["OTel collector"] + otelcol["OTel collector"] --> stdout["stdout"] +``` + +Let's first have a look at what Expert Grogu is doing in terms of logging (in +`expert-grogu/main.py`, in the `practice()` function): + +```python +start_time = time.time() +try: + how_long_int = int(how_long) + logger.info("Starting to practice The Telemetry for %i second(s)", how_long_int) + while time.time() - start_time < how_long_int: + next_char = random.choice(string.punctuation) + print(next_char, end="", flush=True) + time.sleep(0.5) + logger.info("Done practicing") +except ValueError as ve: + logger.error("I need an integer value for the time to practice: %s", ve) + return False +except Exception as e: + logger.error("An unexpected error occurred: %s", e) + return False +return True +``` + +So, in above function we see Expert Grogu using `logger.xxx()` functions to +communicate status/progress as well as error conditions such as user providing +wrong input value for the time to practice (such as `python main.py ABC` rather +than `python main.py 5` since the former can't be parsed into an integer). + +We are using the following `Dockerfile` (installing the one dependency we have, +`python-json-logger==2.0.7`): + +```docker +FROM python:3.11 +WORKDIR /usr/src/app +COPY requirements.txt requirements.txt +RUN pip3 install --no-cache-dir -r requirements.txt +COPY . . +``` + +With the following OTel Collector config (visualize via +[OTelBin][otelbin-expert-grogu]): + +```yaml +receivers: + filelog: + include: [/usr/src/app/*.log] + operators: + - type: json_parser + timestamp: + parse_from: attributes.asctime + layout: '%Y-%m-%dT%H:%M:%S' + severity: + parse_from: attributes.levelname +exporters: + debug: + verbosity: detailed +service: + pipelines: + logs: + receivers: [filelog] + exporters: [debug] +``` + +In the Docker Compose file that looks as follows, we bring all above together: + +```yaml +version: '3' +services: + collector: + image: otel/opentelemetry-collector-contrib:latest + volumes: + - ./otel-config.yaml:/etc/otelcol-contrib/config.yaml + - ./:/usr/src/app + command: ['--config=/etc/otelcol-contrib/config.yaml'] + ports: + - '4317:4317' + baby-grogu: + build: . + command: python main.py 10 + volumes: + - .:/usr/src/app +``` + +Which you can run it with `docker compose -f docker-compose.yaml` and you should +see something like: + +```log +expert-grogu-collector-1 | 2023-11-15T17:21:32.811Z info service@v0.88.0/telemetry.go:84 Setting up own telemetry... +expert-grogu-collector-1 | 2023-11-15T17:21:32.812Z info service@v0.88.0/telemetry.go:201 Serving Prometheus metrics {"address": ":8888", "level": "Basic"} +expert-grogu-collector-1 | 2023-11-15T17:21:32.812Z info exporter@v0.88.0/exporter.go:275 Deprecated component. Will be removed in future releases. {"kind": "exporter", "data_type": "logs", "name": "logging"} +expert-grogu-collector-1 | 2023-11-15T17:21:32.812Z info service@v0.88.0/service.go:143 Starting otelcol-contrib... {"Version": "0.88.0", "NumCPU": 4} +expert-grogu-collector-1 | 2023-11-15T17:21:32.812Z info extensions/extensions.go:33 Starting extensions... +expert-grogu-collector-1 | 2023-11-15T17:21:32.812Z info adapter/receiver.go:45 Starting stanza receiver {"kind": "receiver", "name": "filelog", "data_type": "logs"} +expert-grogu-collector-1 | 2023-11-15T17:21:32.813Z info service@v0.88.0/service.go:169 Everything is ready. Begin running and processing data. +expert-grogu-collector-1 | 2023-11-15T17:21:33.014Z info fileconsumer/file.go:182 Started watching file {"kind": "receiver", "name": "filelog", "data_type": "logs", "component": "fileconsumer", "path": "/usr/src/app/exgru.log"} +expert-grogu-collector-1 | 2023-11-15T17:21:33.113Z info LogsExporter {"kind": "exporter", "data_type": "logs", "name": "logging", "resource logs": 1, "log records": 4} +expert-grogu-collector-1 | 2023-11-15T17:21:33.113Z info ResourceLog #0 +expert-grogu-collector-1 | Resource SchemaURL: +expert-grogu-collector-1 | ScopeLogs #0 +expert-grogu-collector-1 | ScopeLogs SchemaURL: +expert-grogu-collector-1 | InstrumentationScope +expert-grogu-collector-1 | LogRecord #0 +expert-grogu-collector-1 | ObservedTimestamp: 2023-11-15 17:21:33.01473246 +0000 UTC +expert-grogu-collector-1 | Timestamp: 2023-11-15 17:16:58 +0000 UTC +expert-grogu-collector-1 | SeverityText: INFO +expert-grogu-collector-1 | SeverityNumber: Info(9) +expert-grogu-collector-1 | Body: Str({"asctime": "2023-11-15T17:16:58", "levelname": "INFO", "message": "Starting to practice The Telemetry for 10 second(s)", "taskName": null}) +expert-grogu-collector-1 | Attributes: +expert-grogu-collector-1 | -> log.file.name: Str(exgru.log) +expert-grogu-collector-1 | -> asctime: Str(2023-11-15T17:16:58) +expert-grogu-collector-1 | -> levelname: Str(INFO) +expert-grogu-collector-1 | -> message: Str(Starting to practice The Telemetry for 10 second(s)) +expert-grogu-collector-1 | -> taskName: Str() +expert-grogu-collector-1 | Trace ID: +expert-grogu-collector-1 | Span ID: +expert-grogu-collector-1 | Flags: 0 +expert-grogu-collector-1 | LogRecord #1 +expert-grogu-collector-1 | ObservedTimestamp: 2023-11-15 17:21:33.014871669 +0000 UTC +expert-grogu-collector-1 | Timestamp: 2023-11-15 17:17:08 +0000 UTC +expert-grogu-collector-1 | SeverityText: INFO +expert-grogu-collector-1 | SeverityNumber: Info(9) +expert-grogu-collector-1 | Body: Str({"asctime": "2023-11-15T17:17:08", "levelname": "INFO", "message": "Done practicing", "taskName": null}) +expert-grogu-collector-1 | Attributes: +expert-grogu-collector-1 | -> log.file.name: Str(exgru.log) +expert-grogu-collector-1 | -> asctime: Str(2023-11-15T17:17:08) +expert-grogu-collector-1 | -> levelname: Str(INFO) +expert-grogu-collector-1 | -> message: Str(Done practicing) +expert-grogu-collector-1 | -> taskName: Str() +expert-grogu-collector-1 | Trace ID: +expert-grogu-collector-1 | Span ID: +expert-grogu-collector-1 | Flags: 0 +expert-grogu-collector-1 | LogRecord #2 +expert-grogu-collector-1 | ObservedTimestamp: 2023-11-15 17:21:33.01487521 +0000 UTC +expert-grogu-collector-1 | Timestamp: 2023-11-15 17:17:08 +0000 UTC +expert-grogu-collector-1 | SeverityText: INFO +expert-grogu-collector-1 | SeverityNumber: Info(9) +expert-grogu-collector-1 | Body: Str({"asctime": "2023-11-15T17:17:08", "levelname": "INFO", "message": "Practicing The Telemetry completed: True", "taskName": null}) +expert-grogu-collector-1 | Attributes: +expert-grogu-collector-1 | -> message: Str(Practicing The Telemetry completed: True) +expert-grogu-collector-1 | -> taskName: Str() +expert-grogu-collector-1 | -> asctime: Str(2023-11-15T17:17:08) +expert-grogu-collector-1 | -> log.file.name: Str(exgru.log) +expert-grogu-collector-1 | -> levelname: Str(INFO) +expert-grogu-collector-1 | Trace ID: +expert-grogu-collector-1 | Span ID: +expert-grogu-collector-1 | Flags: 0 +expert-grogu-collector-1 | LogRecord #3 +expert-grogu-collector-1 | ObservedTimestamp: 2023-11-15 17:21:33.01487771 +0000 UTC +expert-grogu-collector-1 | Timestamp: 2023-11-15 17:21:32 +0000 UTC +expert-grogu-collector-1 | SeverityText: INFO +expert-grogu-collector-1 | SeverityNumber: Info(9) +expert-grogu-collector-1 | Body: Str({"asctime": "2023-11-15T17:21:32", "levelname": "INFO", "message": "Starting to practice The Telemetry for 10 second(s)", "taskName": null}) +expert-grogu-collector-1 | Attributes: +expert-grogu-collector-1 | -> log.file.name: Str(exgru.log) +expert-grogu-collector-1 | -> asctime: Str(2023-11-15T17:21:32) +expert-grogu-collector-1 | -> levelname: Str(INFO) +expert-grogu-collector-1 | -> message: Str(Starting to practice The Telemetry for 10 second(s)) +expert-grogu-collector-1 | -> taskName: Str() +expert-grogu-collector-1 | Trace ID: +expert-grogu-collector-1 | Span ID: +expert-grogu-collector-1 | Flags: 0 +``` + +## Yoda level + +Now we're switching gears and look over Yoda's shoulders, a Telemetry Master. + +First, change into the [yoda/][repo-yoda] directory. + +In this scenario we see Yoda using the OTel logs bridge API in the Python app to +directly ingest logs, in [OpenTelemetry Protocol][otlp] (OTLP) format, into the +OTel Collector. This is both faster and more reliable than first logging to a +file and have the collector read it off of it! + +Overall, we have the following setup Yoda is using: + +```mermaid +flowchart LR + app["python main.py"]-- OTLP --> otelcol["OTel collector"] + otelcol["OTel collector"] --> stdout["stdout"] +``` + +With the following OTel collector config (visualize via +[OTelBin][otelbin-yoda]): + +```yaml +receivers: + otlp: + protocols: + grpc: +exporters: + debug: + verbosity: detailed +service: + pipelines: + logs: + receivers: [otlp] + exporters: [debug] +``` + +Now run Yoda's setup with `docker compose -f docker-compose.yaml` and you should +see something akin to below: + +```shell +yoda-collector-1 | 2023-11-15T16:54:22.545Z info service@v0.88.0/telemetry.go:84 Setting up own telemetry... +yoda-collector-1 | 2023-11-15T16:54:22.546Z info service@v0.88.0/telemetry.go:201 Serving Prometheus metrics {"address": ":8888", "level": "Basic"} +yoda-collector-1 | 2023-11-15T16:54:22.546Z info exporter@v0.88.0/exporter.go:275 Deprecated component. Will be removed in future releases. {"kind": "exporter", "data_type": "logs", "name": "logging"} +yoda-collector-1 | 2023-11-15T16:54:22.547Z info service@v0.88.0/service.go:143 Starting otelcol-contrib... {"Version": "0.88.0", "NumCPU": 4} +yoda-collector-1 | 2023-11-15T16:54:22.547Z info extensions/extensions.go:33 Starting extensions... +yoda-collector-1 | 2023-11-15T16:54:22.547Z warn internal@v0.88.0/warning.go:40 Using the 0.0.0.0 address exposes this server to every network interface, which may facilitate Denial of Service attacks {"kind": "receiver", "name": "otlp", "data_type": "logs", "documentation": "https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks"} +yoda-collector-1 | 2023-11-15T16:54:22.549Z info otlpreceiver@v0.88.0/otlp.go:83 Starting GRPC server {"kind": "receiver", "name": "otlp", "data_type": "logs", "endpoint": "0.0.0.0:4317"} +yoda-collector-1 | 2023-11-15T16:54:22.550Z info service@v0.88.0/service.go:169 Everything is ready. Begin running and processing data. +yoda-collector-1 | 2023-11-15T16:54:27.667Z info LogsExporter {"kind": "exporter", "data_type": "logs", "name": "logging", "resource logs": 1, "log records": 1} +yoda-collector-1 | 2023-11-15T16:54:27.668Z info ResourceLog #0 +yoda-collector-1 | Resource SchemaURL: +yoda-collector-1 | Resource attributes: +yoda-collector-1 | -> telemetry.sdk.language: Str(python) +yoda-collector-1 | -> telemetry.sdk.name: Str(opentelemetry) +yoda-collector-1 | -> telemetry.sdk.version: Str(1.21.0) +yoda-collector-1 | -> service.name: Str(train-the-telemetry) +yoda-collector-1 | -> service.instance.id: Str(33992a23112e) +yoda-collector-1 | ScopeLogs #0 +yoda-collector-1 | ScopeLogs SchemaURL: +yoda-collector-1 | InstrumentationScope opentelemetry.sdk._logs._internal +yoda-collector-1 | LogRecord #0 +yoda-collector-1 | ObservedTimestamp: 1970-01-01 00:00:00 +0000 UTC +yoda-collector-1 | Timestamp: 2023-11-15 16:54:22.651675136 +0000 UTC +yoda-collector-1 | SeverityText: INFO +yoda-collector-1 | SeverityNumber: Info(9) +yoda-collector-1 | Body: Str(Starting to practice The Telemetry for 10 second(s)) +yoda-collector-1 | Trace ID: +yoda-collector-1 | Span ID: +yoda-collector-1 | Flags: 0 +yoda-collector-1 | {"kind": "exporter", "data_type": "logs", "name": "logging"} +yoda-collector-1 | 2023-11-15T16:54:32.715Z info LogsExporter {"kind": "exporter", "data_type": "logs", "name": "logging", "resource logs": 1, "log records": 2} +yoda-collector-1 | 2023-11-15T16:54:32.716Z info ResourceLog #0 +yoda-collector-1 | Resource SchemaURL: +yoda-collector-1 | Resource attributes: +yoda-collector-1 | -> telemetry.sdk.language: Str(python) +yoda-collector-1 | -> telemetry.sdk.name: Str(opentelemetry) +yoda-collector-1 | -> telemetry.sdk.version: Str(1.21.0) +yoda-collector-1 | -> service.name: Str(train-the-telemetry) +yoda-collector-1 | -> service.instance.id: Str(33992a23112e) +yoda-collector-1 | ScopeLogs #0 +yoda-collector-1 | ScopeLogs SchemaURL: +yoda-collector-1 | InstrumentationScope opentelemetry.sdk._logs._internal +yoda-collector-1 | LogRecord #0 +yoda-collector-1 | ObservedTimestamp: 1970-01-01 00:00:00 +0000 UTC +yoda-collector-1 | Timestamp: 2023-11-15 16:54:32.713701888 +0000 UTC +yoda-collector-1 | SeverityText: INFO +yoda-collector-1 | SeverityNumber: Info(9) +yoda-collector-1 | Body: Str(Done practicing) +yoda-collector-1 | Trace ID: +yoda-collector-1 | Span ID: +yoda-collector-1 | Flags: 0 +yoda-collector-1 | LogRecord #1 +yoda-collector-1 | ObservedTimestamp: 1970-01-01 00:00:00 +0000 UTC +yoda-collector-1 | Timestamp: 2023-11-15 16:54:32.714062336 +0000 UTC +yoda-collector-1 | SeverityText: INFO +yoda-collector-1 | SeverityNumber: Info(9) +yoda-collector-1 | Body: Str(Practicing The Telemetry completed: True) +yoda-collector-1 | Trace ID: +yoda-collector-1 | Span ID: +yoda-collector-1 | Flags: 0 +yoda-collector-1 | {"kind": "exporter", "data_type": "logs", "name": "logging"} +yoda-baby-grogu-1 | =`;*'+.|,+?):(*-<}~} +``` + +Fun, huh? You can play around with Yoda's source code to add more contextual +information and add processors to manipulate the log records as they pass the +collector, now. + +May _The Telemetry_ be with you, young Padawan! + +## What's next? + +Now that you're familiar with _The Telemetry_ and its good practices, you could +extend Yoda's code to do the following: + +1. Add more context. For example, try to use + [OTel resource attributes](/docs/concepts/resources/) and the + [semantic conventions](/docs/concepts/semantic-conventions/) to make the + context of the execution more explicit. +1. Enrich the logs in the OTel Collector or filter certain severity levels, + using processors such as the + [transform or attributes processors](/docs/collector/transforming-telemetry/). +1. Add [tracing](/docs/concepts/signals/traces/) support by emitting spans, + where it makes sense. +1. Add an Observability backend such as OpenSearch (along with [Data + Prepper][dataprepper]) to the setup, allowing to ingest spans and logs in + OTLP format. +1. Once you have traces and logs ingested in a backend, try to correlate these + two telemetry signal types in the backend along with a frontend such as + Grafana. +1. Use [Automatic Instrumentation](/docs/concepts/instrumentation/automatic/) to + further enrich telemetry. + +The community is currently working on the [Events API +Interface][otel-logs-events] which is a good place to continue your research and +maybe provide feedback? + +## Kudos and References + +Kudos go out to [Severin Neumann][svrnm] and [Houssam Chehab][hossko] who both +were very patient with me and pivotal concerning making Yoda level work, I owe +you! + +If you want to dive deeper into OTel log collection (especially with Python), +check out the following resources: + +- [OpenTelemetry Logging][otel-logs-spec] (OTel docs) +- [Events API Interface][otel-logs-events] (OTel docs) +- [General Logs Attributes][otel-semconv-logs] (semantic conventions) +- [OpenTelemetry Python][otel-python-repo] (GitHub repository) +- [A language-specific implementation of OpenTelemetry in Python][otel-python] + (OTel docs) +- [OpenTelemetry Logging Instrumentation][py-docs-logs] (Python docs) +- [OpenTelemetry Logs SDK example][py-docs-logs-example] (Python docs) + +[repo-baby-grogu]: + https://github.com/mhausenblas/ref.otel.help/tree/main/how-to/logs-collection/baby-grogu/ +[repo-expert-grogu]: + https://github.com/mhausenblas/ref.otel.help/tree/main/how-to/logs-collection/expert-grogu/ +[repo-yoda]: + https://github.com/mhausenblas/ref.otel.help/tree/main/how-to/logs-collection/yoda/ +[filelog]: + https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver +[debug]: + https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/debugexporter +[otelbin-expert-grogu]: + https://www.otelbin.io/?#config=receivers%3A*N__filelog%3A*N____include%3A_%5B_%2Fusr%2Fsrc%2Fapp%2F**.log_%5D*N____start*_at%3A_beginning*N____operators%3A*N____-_type%3A_json*_parser*N______timestamp%3A*N________parse*_from%3A_attributes.asctime*N________layout%3A_*%22*.Y-*.m-*.dT*.H%3A*.M%3A*.S*%22*N______severity%3A*N________parse*_from%3A_attributes.levelname*Nexporters%3A*N__logging%3A*N____verbosity%3A_detailed*Nservice%3A*N__pipelines%3A*N____logs%3A*N______receivers%3A_%5B_filelog_%5D*N______exporters%3A_%5B_logging_%5D%7E +[otlp]: /docs/specs/otlp/ +[otelbin-yoda]: + https://www.otelbin.io/?#config=receivers%3A*N__otlp%3A*N____protocols%3A*N______grpc%3A*Nexporters%3A*N__logging%3A*N____verbosity%3A_detailed*Nservice%3A*N__pipelines%3A*N____logs%3A*N______receivers%3A_%5B_otlp_%5D*N______exporters%3A_%5B_logging_%5D%7E +[dataprepper]: https://opensearch.org/docs/latest/data-prepper/index/ +[svrnm]: https://github.com/svrnm +[hossko]: https://github.com/hossko +[otel-logs-spec]: /docs/specs/otel/logs/ +[otel-logs-events]: /docs/specs/otel/logs/event-api/ +[otel-semconv-logs]: /docs/specs/semconv/general/logs/ +[otel-python-repo]: https://github.com/open-telemetry/opentelemetry-python +[otel-python]: /docs/instrumentation/python/ +[py-docs-logs]: + https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html +[py-docs-logs-example]: + https://opentelemetry-python.readthedocs.io/en/latest/examples/logs/README.html diff --git a/content/en/blog/2023/otterize-otel/index.md b/content/en/blog/2023/otterize-otel/index.md new file mode 100644 index 000000000000..af46b35260ea --- /dev/null +++ b/content/en/blog/2023/otterize-otel/index.md @@ -0,0 +1,78 @@ +--- +title: Adding OpenTelemetry support to the Otterize network mapper +linkTitle: OTel integration for Otterize network mapper +date: 2023-11-17 +author: >- + [Ori Shoshan](https://github.com/orishoshan) (Otterize), [David G. + Simmons](https://github.com/davidgs) (Otterize) +cSpell:ignore: brainer Otterize Shoshan +--- + +{{< blog/integration-badge >}} + +## A no-brainer integration: Adding OpenTelemetry support to the Otterize network mapper + +[Otterize](https://otterize.com/) automates workload Identity and Access +Management (IAM) for zero-trust, automating policies like Kubernetes network +policies, Istio policies, AWS IAM policies, and more, through a collection of +[open source projects](https://github.com/otterize). To enable that automation, +Otterize built the [network mapper](https://github.com/otterize/network-mapper). +Otterize network mapper is a standalone Kubernetes tool that builds a live +network map of your infrastructure, and can visualize it into an image file, as +an interactive graph on Otterize Cloud, or export it as ClientIntents, a custom +Kubernetes resource that describes each workload’s required access, or their +“intentions”. It’s designed to be a zero-config deployment that immediately +provides a network map, along with being light on resource use, and requiring +the lowest privileges possible. At its core, it collects DNS traffic and +analyzes it to figure out what communications look like. + +The awesome folks at +[ServiceNow Cloud Observability](https://www.servicenow.com/products/observability.html) +(formerly Lightstep) +[recently contributed OpenTelemetry exporting support to the network mapper](https://github.com/otterize/network-mapper/pull/141). +This means that you can now use the network mapper to generate a network map of +your infrastructure, and export OpenTelemetry data to a Grafana Tempo instance, +to see a service graph without making any changes to the services deployed in +your cluster. + +![A screenshot showing a Grafana Tempo service graph](otterize-otel.png) + +This integration was simple: the OpenTelemetry SDK is easy to use and fits like +a glove into the existing network mapper code. + +### How does OpenTelemetry work with the network mapper? + +With the service graph, you'll be able to see what services are communicating +with each other. You can then use that information for operational or security +needs, such as determining the blast radius of a downtime or security incident. +You can use the service graph to figure out where to start rolling out +OpenTelemetry tracing, as that deployment tends to be more involved and requires +the integration of the OpenTelemetry SDK into your source code. + +While it was easy to use the OTel SDK for the network mapper, we can see why +there's a bit of a chicken-and-egg problem here when you're looking into +OpenTelemetry: to start seeing significant value with tracing, you really need +to see a bunch of services together, and that can be a lot to do as your first +step. + +Since the network mapper doesn’t require you to change any of your existing +application code, the barrier to entry for getting started with OpenTelemetry is +significantly reduced. This is a great way to get started with emitting +OpenTelemetry metrics for each service in your infrastructure and to +understanding how your services interact with each other, before committing to a +more significant OpenTelemetry rollout. + +### The strength of open source in action! + +We never initially contemplated adding this feature because it didn't cross our +minds, but its value is undeniable, particularly for those deploying the network +mapper and utilizing Grafana Tempo, or individuals exploring OpenTelemetry. + +It's great that the ServiceNow Cloud Observability Team stumbled upon the +network mapper and graciously contributed this enhancement, making the network +mapper seamlessly integrate with OpenTelemetry and Grafana. This is the beauty +of open source collaboration! + +If you're interested in exploring or contributing to our project, head on over +to +[https://github.com/otterize/network-mapper](https://github.com/otterize/network-mapper). diff --git a/content/en/blog/2023/otterize-otel/otterize-otel.png b/content/en/blog/2023/otterize-otel/otterize-otel.png new file mode 100644 index 000000000000..28a063c12add Binary files /dev/null and b/content/en/blog/2023/otterize-otel/otterize-otel.png differ diff --git a/content/en/blog/2023/perf-testing/index.md b/content/en/blog/2023/perf-testing/index.md new file mode 100644 index 000000000000..96339d688a50 --- /dev/null +++ b/content/en/blog/2023/perf-testing/index.md @@ -0,0 +1,83 @@ +--- +title: OTel component performance benchmarks +linkTitle: Performance benchmarks +date: 2023-11-27 +author: '[Martin Kuba](https://github.com/martinkuba) (Lightstep)' +cSpell:ignore: Kuba +--- + +As more and more users are looking to use OpenTelemetry instrumentation in their +production deployments, one important consideration is the impact that +OpenTelemetry will have on their application performance. In this blog post I +will discuss a few recent improvements in tooling around performance +benchmarking. + +### Measuring performance overhead + +Instrumentation is not free. It intercepts an application's operations and +collects (often) a large amount of data, which takes additional CPU and memory. +This can have a direct effect on throughput and response time, which can affect +the end-user experience with the application. It can also have an impact on +operational cost, such as increasing the number of instances a service runs on. + +Providing general guidance about performance overhead is inherently difficult. +There are many factors that affect performance: the application throughput, +hardware the application runs on, what exactly is instrumented, how the +OpenTelemetry SDK is configured, sampling, etc. Ultimately, the best way to +measure performance is in the context of the specific application by running a +load test. + +With that said a number of OpenTelemetry components include performance tests +that help catch regressions and can be used to provide some idea of their +performance characteristics. + +### OpenTelemetry Collector + +The [OpenTelemetry Collector](/docs/collector/) runs +[end-to-end load tests](https://github.com/open-telemetry/opentelemetry-collector-contrib/actions/workflows/load-tests.yml) +on every merge to the main branch. There have been two recent updates to the CI +workflow: + +1. Tests run on community-owned bare metal machines, which has made test results + more consistent. +2. Test results are published automatically: for a subset of the load test + results, see [Collector Benchmarks](/docs/collector/benchmarks/). The + [complete test results](https://open-telemetry.github.io/opentelemetry-collector-contrib/benchmarks/loadtests/) + are available as well. + +### Language SDKs + +A number of OpenTelemetry SDKs already include existing micro-benchmark tests, +for example: + +- [SpanBenchmark.java](https://github.com/open-telemetry/opentelemetry-java/blob/main/sdk/trace/src/jmh/java/io/opentelemetry/sdk/trace/SpanBenchmark.java) +- [test_benchmark_trace.py](https://github.com/open-telemetry/opentelemetry-python/blob/main/opentelemetry-sdk/tests/performance/benchmarks/trace/test_benchmark_trace.py) +- [benchmark_test.go](https://github.com/open-telemetry/opentelemetry-go/blob/main/sdk/trace/benchmark_test.go) +- [benchmark/span.js](https://github.com/open-telemetry/opentelemetry-js/blob/main/packages/opentelemetry-sdk-trace-base/test/performance/benchmark/span.js) + +These tests were run only on demand in the past. With the recent tooling +improvements, Java and JavaScript tests are now run automatically on every merge +to the main branch, and the results are published for anyone to easily access. +The tests are also run on community-owned bare metal machines, so that the +results are as consistent as possible. + +{{% figure + src="java-benchmark-results.png" + caption="Sample [benchmark results for Java](https://open-telemetry.github.io/opentelemetry-java/benchmarks/)" +%}} + +{{% figure + src="js-benchmark-results.png" + caption="Sample [benchmark results for JavaScript](https://open-telemetry.github.io/opentelemetry-js/benchmarks/)" +%}} + +There is work in progress to make the same updates for Python and Go. + +### Conclusion + +Performance optimization is often considered only as an afterthought, but it +does not have to be. We are making improvements to automated tooling and +documentation to provide project maintainers and the community with reliable +performance testing during development. Ultimately our focus as a community is +to give end users confidence when using our components, especially around the +impact of OpenTelemetry's instrumentation on their applications’ performance. diff --git a/content/en/blog/2023/perf-testing/java-benchmark-results.png b/content/en/blog/2023/perf-testing/java-benchmark-results.png new file mode 100644 index 000000000000..67dba5a6ccd5 Binary files /dev/null and b/content/en/blog/2023/perf-testing/java-benchmark-results.png differ diff --git a/content/en/blog/2023/perf-testing/js-benchmark-results.png b/content/en/blog/2023/perf-testing/js-benchmark-results.png new file mode 100644 index 000000000000..769a9938f9b8 Binary files /dev/null and b/content/en/blog/2023/perf-testing/js-benchmark-results.png differ diff --git a/content/en/community/end-user/discussion-group.md b/content/en/community/end-user/discussion-group.md index ca6dea8c1e1c..7b84fd5b4867 100644 --- a/content/en/community/end-user/discussion-group.md +++ b/content/en/community/end-user/discussion-group.md @@ -14,7 +14,7 @@ Feedback that is shared and collected in these sessions will be routed back to the relevant project maintainers to help drive prioritization of improvements and changes to the project. -**New for 2023!** +## New for 2023 - Sessions are now available for all regions! - You can now find summaries of past discussions every month! Search the blog @@ -26,7 +26,7 @@ and changes to the project. recorded**. This will help make the feedback more discoverable by the community. -**Upcoming sessions** +## Upcoming sessions Here are upcoming sessions, or you can view them on the [OpenTelemetry calendar](https://github.com/open-telemetry/community#calendar): diff --git a/content/en/docs/collector/_index.md b/content/en/docs/collector/_index.md index a3119a2e83fe..c66013671257 100644 --- a/content/en/docs/collector/_index.md +++ b/content/en/docs/collector/_index.md @@ -3,7 +3,7 @@ title: Collector description: Vendor-agnostic way to receive, process and export telemetry data. aliases: [collector/about] cascade: - vers: 0.89.0 + vers: 0.90.0 weight: 10 --- diff --git a/content/en/docs/collector/configuration.md b/content/en/docs/collector/configuration.md index a7cca798af5f..fa70a5c3b258 100644 --- a/content/en/docs/collector/configuration.md +++ b/content/en/docs/collector/configuration.md @@ -1,40 +1,45 @@ --- title: Configuration weight: 20 +description: Learn how to configure the Collector to suit your needs # prettier-ignore -cSpell:ignore: cfssl cfssljson fluentforward gencert genkey hostmetrics initca loglevel OIDC oidc otlphttp pprof prodevent prometheusremotewrite servicegraph spanevents spanmetrics upsert zpages +cSpell:ignore: cfssl cfssljson fluentforward gencert genkey hostmetrics initca loglevel OIDC oidc otlphttp pprof prodevent prometheusremotewrite servicegraph spanevents spanmetrics struct upsert zpages --- -Familiarity with the following pages is assumed: +You can configure the Collector to suit your observability needs. Before you +learn how Collector configuration works, familiarize yourself with the following +content: - [Data collection concepts][dcc] in order to understand the repositories applicable to the OpenTelemetry Collector. - [Security guidance](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md) -## Basics +## Configuration structure {#basics} -The Collector consists of four components that access telemetry data: +The structure of any Collector configuration file consists of four classes of +pipeline components that access telemetry data: - [Receivers](#receivers) - + - [Processors](#processors) - + - [Exporters](#exporters) - + - [Connectors](#connectors) - + -These components once configured must be enabled via pipelines within the -[service](#service) section. +After each pipeline component is configured you must enable it using the +pipelines within the [service](#service) section of the configuration file. -Secondarily, there are [extensions](#extensions), which provide capabilities -that can be added to the Collector, but which do not require direct access to -telemetry data and are not part of pipelines. They are also enabled within the -[service](#service) section. +Besides pipeline components you can also configure [extensions](#extensions), +which provide capabilities that can be added to the Collector, such as +diagnostic tools. Extensions don't require direct access to telemetry data and +are enabled through the [service](#service) section. -An example configuration would look like: +The following is an example of Collector configuration with a receiver, a +processor, an exporter, and three extensions: ```yaml receivers: @@ -72,10 +77,10 @@ service: exporters: [otlp] ``` -Note that receivers, processors, exporters and/or pipelines are defined via -component identifiers in `type[/name]` format (e.g. `otlp` or `otlp/2`). -Components of a given type can be defined more than once as long as the -identifiers are unique. For example: +Note that receivers, processors, exporters and pipelines are defined through +component identifiers following the `type[/name]` format, for example `otlp` or +`otlp/2`. You can define components of a given type more than once as long as +the identifiers are unique. For example: ```yaml receivers: @@ -125,7 +130,7 @@ service: ``` The configuration can also include other files, causing the Collector to merge -the two files in a single in-memory representation of the YAML configuration: +them in a single in-memory representation of the YAML configuration: ```yaml receivers: @@ -172,27 +177,22 @@ service: exporters: [otlp] ``` -## Receivers {#receivers} +## Receivers {#receivers} -A receiver, which can be push or pull based, is how data gets into the -Collector. Receivers may support one or more -[data sources](/docs/concepts/signals/). - -The `receivers:` section is how receivers are configured. Many receivers come -with default settings so simply specifying the name of the receiver is enough to -configure it (for example, `zipkin:`). If configuration is required or a user -wants to change the default configuration then such configuration must be -defined in this section. Configuration parameters specified for which the -receiver provides a default configuration are overridden. +Receivers collect telemetry from one or more sources. They can be pull or push +based, and may support one or more [data sources](/docs/concepts/signals/). -> Configuring a receiver does not enable it. Receivers are enabled via pipelines -> within the [service](#service) section. +Receivers are configured in the `receivers` section. Many receivers come with +default settings, so that specifying the name of the receiver is enough to +configure it. If you need to configure a receiver or want to change the default +configuration, you can do so in this section. Any setting you specify overrides +the default values, if present. -One or more receivers must be configured. By default, no receivers are -configured. A basic example of receivers is provided below. +> Configuring a receiver does not enable it. Receivers are enabled by adding +> them to the appropriate pipelines within the [service](#service) section. -> For detailed receiver configuration, see the -> [receiver README](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md). +The Collector requires one or more receivers. The following example shows +various receivers in the same configuration file: ```yaml receivers: @@ -247,28 +247,33 @@ receivers: zipkin: ``` -## Processors {#processors} +> For detailed receiver configuration, see the +> [receiver README](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md). -Processors are run on data between being received and being exported. Processors -are optional though -[some are recommended](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor#recommended-processors). +## Processors {#processors} -The `processors:` section is how processors are configured. Processors may come -with default settings, but many require configuration. Any configuration for a -processor must be done in this section. Configuration parameters specified for -which the processor provides a default configuration are overridden. +Processors take the data collected by receivers and modify or transform it +before sending it to the exporters. Data processing happens according to rules +or settings defined for each processor, which might include filtering, dropping, +renaming, or recalculating telemetry, among other operations. The order of the +processors in a pipeline determines the order of the processing operations that +the Collector applies to the signal. -> Configuring a processor does not enable it. Processors are enabled via -> pipelines within the [service](#service) section. +Processors are optional, although some +[are recommended](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor#recommended-processors). -A basic example of the default processors is provided below. The full list of -processors can be found by combining the list found -[here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor) -and -[here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor). +You can configure processors using the `processors` section of the Collector +configuration file. Any setting you specify overrides the default values, if +present. -> For detailed processor configuration, see the -> [processor README](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/README.md). +> Configuring a processor does not enable it. Processors are enabled by adding +> them to the appropriate pipelines within the [service](#service) section. + +The following example shows several default processors in the same configuration +file. You can find the full list of processors by combining the list from +[opentelemetry-collector-contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor) +and the list from +[opentelemetry-collector](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor). ```yaml processors: @@ -328,28 +333,25 @@ processors: separator: '::' ``` -## Exporters {#exporters} +> For detailed processor configuration, see the +> [processor README](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/README.md). -An exporter, which can be push or pull based, is how you send data to one or -more backends/destinations. Exporters may support one or more -[data sources](/docs/concepts/signals/). +## Exporters {#exporters} -The `exporters:` section is how exporters are configured. Exporters may come -with default settings, but many require configuration to specify at least the -destination and security settings. Any configuration for an exporter must be -done in this section. Configuration parameters specified for which the exporter -provides a default configuration are overridden. +Exporters send data to one or more backends or destinations. Exporters can be +pull or push based, and may support one or more +[data sources](/docs/concepts/signals/). -> Configuring an exporter does not enable it. Exporters are enabled via -> pipelines within the [service](#service) section. +The `exporters` section contains exporters configuration. Most exporters require +configuration to specify at least the destination, as well as security settings, +like authentication tokens or TLS certificates. Any setting you specify +overrides the default values, if present. -One or more exporters must be configured. By default, no exporters are -configured. A basic example of exporters is provided below. Certain exporter -configurations require x.509 certificates to be created in order to be secure, -as described in [setting up certificates](#setting-up-certificates). +> Configuring an exporter does not enable it. Exporters are enabled by adding +> them to the appropriate pipelines within the [service](#service) section. -> For detailed exporter configuration, see the -> [exporter README.md](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/README.md). +The Collector requires one or more exporters. The following example shows +various exporters in the same configuration file: ```yaml exporters: @@ -359,7 +361,7 @@ exporters: # Data sources: traces otlp/jaeger: - endpoint: jaeger-all-in-one:4317 + endpoint: jaeger-server:4317 tls: cert_file: cert.pem key_file: cert-key.pem @@ -386,50 +388,61 @@ exporters: # Data sources: traces, metrics otlphttp: - endpoint: https://example.com:4318 + endpoint: https://otlp.example.com:4318 # Data sources: metrics prometheus: - endpoint: prometheus:8889 + endpoint: localhost:8889 namespace: default # Data sources: metrics prometheusremotewrite: - endpoint: http://some.url:9411/api/prom/push - # For official Prometheus (e.g. running via Docker) - # endpoint: 'http://prometheus:9090/api/v1/write' + endpoint: http://prometheus.example.com:9411/api/prom/push + # When using the official Prometheus (running via Docker) + # endpoint: 'http://prometheus:9090/api/v1/write', add: # tls: # insecure: true # Data sources: traces zipkin: - endpoint: http://localhost:9411/api/v2/spans + endpoint: http://zipkin.example.com:9411/api/v2/spans ``` -## Connectors {#connectors} +Notice that some exporters require x.509 certificates in order to establish +secure connections, as described in +[setting up certificates](#setting-up-certificates). -A connector is both an exporter and receiver. As the name suggests a Connector -connects two pipelines: It consumes data as an exporter at the end of one -pipeline and emits data as a receiver at the start of another pipeline. It may -consume and emit data of the same data type, or of different data types. A -connector may generate and emit data to summarize the consumed data, or it may -simply replicate or route data. +> For more information on exporter configuration, see the +> [exporter README.md](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/README.md). -The `connectors:` section is how connectors are configured. +## Connectors {#connectors} -> Configuring a connectors does not enable it. Connectors are enabled via -> pipelines within the [service](#service) section. +Connectors join two pipelines, acting as both exporter and receiver. A connector +consumes data as an exporter at the end of one pipeline and emits data as a +receiver at the beginning of another pipeline. The data consumed and emitted may +be of the same type or of different data types. You can use connectors to +summarize consumed data, replicate it, or route it. -One or more connectors may be configured. By default, no connectors are -configured. A basic example of connectors is provided below. +You can configure one or more connectors using the `connectors` section of the +Collector configuration file. By default, no connectors are configured. Each +type of connector is designed to work with one or more pairs of data types and +may only be used to connect pipelines accordingly. -> For detailed connector configuration, see the -> [connector README](https://github.com/open-telemetry/opentelemetry-collector/blob/main/connector/README.md). +> Configuring a connector doesn't enable it. Connectors are enabled through +> pipelines within the [service](#service) section. + +The following example shows the `count` connector and how it's configured in the +`pipelines` section. Notice that the connector acts as an exporter for traces +and as a receiver for metrics, connecting both pipelines: ```yaml -connectors: - forward: +receivers: + foo: +exporters: + bar: + +connectors: count: spanevents: my.prod.event.count: @@ -438,48 +451,36 @@ connectors: - 'attributes["env"] == "prod"' - 'name == "prodevent"' - spanmetrics: - histogram: - explicit: - buckets: [100us, 1ms, 2ms, 6ms, 10ms, 100ms, 250ms] - dimensions: - - name: http.method - default: GET - - name: http.status_code - dimensions_cache_size: 1000 - aggregation_temporality: 'AGGREGATION_TEMPORALITY_CUMULATIVE' - - servicegraph: - latency_histogram_buckets: [1, 2, 3, 4, 5] - dimensions: - - dimension-1 - - dimension-2 - store: - ttl: 1s - max_items: 10 +service: + pipelines: + traces: + receivers: [foo] + exporters: [count] + metrics: + receivers: [count] + exporters: [bar] ``` +> For detailed connector configuration, see the +> [connector README](https://github.com/open-telemetry/opentelemetry-collector/blob/main/connector/README.md). + ## Extensions -Extensions are available primarily for tasks that do not involve processing -telemetry data. Examples of extensions include health monitoring, service -discovery, and data forwarding. Extensions are optional. +Extensions are optional components that expand the capabilities of the Collector +to accomplish tasks not directly involved with processing telemetry data. For +example, you can add extensions for Collector health monitoring, service +discovery, or data forwarding, among others. -The `extensions:` section is how extensions are configured. Many extensions come -with default settings so simply specifying the name of the extension is enough -to configure it (for example, `health_check:`). If configuration is required or -a user wants to change the default configuration then such configuration must be -defined in this section. Configuration parameters specified for which the -extension provides a default configuration are overridden. +You can configure extensions through the `extensions` section of the Collector +configuration file. Most extensions come with default settings, so you can +configure them just by specifying the name of the extension. Any setting you +specify overrides the default values, if present. -> Configuring an extension does not enable it. Extensions are enabled within the +> Configuring an extension doesn't enable it. Extensions are enabled within the > [service](#service) section. -By default, no extensions are configured. A basic example of extensions is -provided below. - -> For detailed extension configuration, see the -> [extension README](https://github.com/open-telemetry/opentelemetry-collector/blob/main/extension/README.md). +By default, no extensions are configured. The following example shows several +extensions configured in the same file: ```yaml extensions: @@ -490,21 +491,26 @@ extensions: size_mib: 512 ``` -## Service +> For detailed extension configuration, see the +> [extension README](https://github.com/open-telemetry/opentelemetry-collector/blob/main/extension/README.md). + +## Service section {#service} The service section is used to configure what components are enabled in the Collector based on the configuration found in the receivers, processors, exporters, and extensions sections. If a component is configured, but not -defined within the service section then it is not enabled. The service section -consists of three sub-sections: +defined within the service section, then it's not enabled. + +The service section consists of three subsections: -- extensions -- pipelines -- telemetry +- Extensions +- Pipelines +- Telemetry ### Extensions {#service-extensions} -Extensions consist of a list of all extensions to enable. For example: +The `extensions` subsection consists of a list of all extensions to enable. For +example: ```yaml service: @@ -513,54 +519,57 @@ service: ### Pipelines -Pipelines can be of the following types: +Pipelines are defined in the `pipelines` subsection and can be of the following +types: -- traces: collects and processes trace data. -- metrics: collects and processes metric data. -- logs: collects and processes log data. +- `traces` collect and processes trace data. +- `metrics` collect and processes metric data. +- `logs` collect and processes log data. -A pipeline consists of a set of receivers, processors and exporters. Each -receiver/processor/exporter must be defined in the configuration outside of the -service section to be included in a pipeline. +A pipeline consists of a set of receivers, processors and exporters. Before +including a receiver, processor, or exporter in a pipeline, make sure to define +its configuration in the appropriate section. -_Note:_ Each receiver/processor/exporter can be used in more than one pipeline. -For processor(s) referenced in multiple pipelines, each pipeline will get a -separate instance of that processor(s). This is in contrast to -receiver(s)/exporter(s) referenced in multiple pipelines, where only one -instance of a receiver/exporter is used for all pipelines. Also note that the -order of processors dictates the order in which data is processed. +You can use the same receiver, processor, or exporter in more than one pipeline. +When a processor is referenced in multiple pipelines, each pipeline gets a +separate instance of the processor. -The following is an example pipeline configuration: +The following is an example of pipeline configuration. Note that the order of +processors dictates the order in which data is processed: ```yaml service: pipelines: metrics: receivers: [opencensus, prometheus] + processors: [batch] exporters: [opencensus, prometheus] traces: receivers: [opencensus, jaeger] - processors: [batch] + processors: [batch, memory_limiter] exporters: [opencensus, zipkin] ``` ### Telemetry -Telemetry is where the telemetry for the collector itself can be configured. It -has two subsections: `logs` and `metrics`. +You can configure telemetry for the Collector itself in the `telemetry` +subsection inside the `service` section. Collector telemetry can be useful when +troubleshooting Collector issues. -The `logs` subsection allows configuration of the logs generated by the -collector. By default the collector will write its logs to stderr with a log -level of `INFO`. You can also add static key-value pairs to all logs using the -`initial_fields` section. -[View the full list of `logs` options here.](https://github.com/open-telemetry/opentelemetry-collector/blob/7666eb04c30e5cfd750db9969fe507562598f0ae/config/service.go#L41-L97) +The `logs` subsection lets you configure logs generated by the Collector. By +default, the Collector writes its logs to stderr with a log level of `INFO`. You +can also add static key-value pairs to all logs using the `initial_fields` +section. See the full list of `logs` options +[here.](https://github.com/open-telemetry/opentelemetry-collector/blob/7666eb04c30e5cfd750db9969fe507562598f0ae/config/service.go#L41-L97) -The `metrics` subsection allows configuration of the metrics generated by the -collector. By default the collector will generate basic metrics about itself and -expose them for scraping at `localhost:8888/metrics` -[View the full list of `metrics` options here.](https://github.com/open-telemetry/opentelemetry-collector/blob/7666eb04c30e5cfd750db9969fe507562598f0ae/config/service.go#L99-L111) +The `metrics` subsection lets you configure metrics generated by the Collector. +By default, the Collector generates basic metrics about itself and expose them +for scraping at . For the full list of options +for the `metrics` subsection, see the +[`ServiceTelemetryMetrics struct`](https://github.com/open-telemetry/opentelemetry-collector/blob/7666eb04c30e5cfd750db9969fe507562598f0ae/config/service.go#L99-L111) +comments. -The following is an example telemetry configuration: +The following example shows Collector telemetry configuration: ```yaml service: @@ -574,9 +583,9 @@ service: address: 0.0.0.0:8888 ``` -## Other Information +## Additional information -### Configuration Environment Variables +### Environment variables The use and expansion of environment variables is supported in the Collector configuration. For example to use the values stored on the `DB_KEY` and @@ -600,48 +609,47 @@ exporters: namespace: $$DataVisualization ``` -### Proxy Support +### Proxy support -Exporters that leverage the `net/http` package (all do today) respect the -following proxy environment variables: +Exporters that use the [`net/http`](https://pkg.go.dev/net/http) package respect +the following proxy environment variables: -- HTTP_PROXY -- HTTPS_PROXY -- NO_PROXY +- `HTTP_PROXY`: Address of the HTTP proxy +- `HTTPS_PROXY`: Address of the HTTPS proxy +- `NO_PROXY`: Addresses that must not use the proxy -If set at Collector start time then exporters, regardless of protocol, will or -will not proxy traffic as defined by these environment variables. +If set at Collector start time, exporters, regardless of the protocol, proxy +traffic or bypass proxy traffic as defined by these environment variables. ### Authentication -Most receivers exposing an HTTP or gRPC port are able to be protected using the -collector's authentication mechanism, and most exporters using HTTP or gRPC -clients are able to add authentication data to the outgoing requests. - -The authentication mechanism in the collector uses the extensions mechanism, -allowing for custom authenticators to be plugged into collector distributions. -If you are interested in developing a custom authenticator, check out the -["Building a custom authenticator"](../custom-auth) document. - -Each authentication extension has two possible usages: as client authenticator -for exporters, adding auth data to outgoing requests, and as server -authenticator for receivers, authenticating incoming connections. Refer to the -authentication extension for a list of its capabilities, but in general, an -authentication extension would only implement one of those traits. For a list of -known authenticators, use the -[Registry](/ecosystem/registry/?s=authenticator&component=extension) available -in this website. - -To add a server authenticator to a receiver in your collector, make sure to: - -1. add the authenticator extension and its configuration under `.extensions` -1. add a reference to the authenticator to `.services.extensions`, so that it's - loaded by the collector -1. add a reference to the authenticator under - `.receivers...auth` - -Here's an example that uses the OIDC authenticator on the receiver side, making -this suitable for a remote collector that receives data from an OpenTelemetry +Most receivers exposing an HTTP or gRPC port can be protected using the +Collector's authentication mechanism. Similarly, most exporters using HTTP or +gRPC clients can add authentication to outgoing requests. + +The authentication mechanism in the Collector uses the extensions mechanism, +allowing for custom authenticators to be plugged into Collector distributions. +Each authentication extension has two possible usages: + +- As client authenticator for exporters, adding auth data to outgoing requests +- As server authenticator for receivers, authenticating incoming connections. + +For a list of known authenticators, see the +[Registry](/ecosystem/registry/?s=authenticator&component=extension). If you're +interested in developing a custom authenticator, see +[Building a custom authenticator](../custom-auth). + +To add a server authenticator to a receiver in the Collector, follow these +steps: + +1. Add the authenticator extension and its configuration under `.extensions`. +1. Add a reference to the authenticator to `.services.extensions`, so that it's + loaded by the Collector. +1. Add a reference to the authenticator under + `.receivers...auth`. + +The following example uses the OIDC authenticator on the receiver side, making +this suitable for a remote Collector that receives data from an OpenTelemetry Collector acting as agent: ```yaml @@ -676,7 +684,7 @@ service: ``` On the agent side, this is an example that makes the OTLP exporter obtain OIDC -tokens, adding them to every RPC made to a remote collector: +tokens, adding them to every RPC made to a remote Collector: ```yaml extensions: @@ -711,15 +719,14 @@ service: - otlp/auth ``` -### Setting up certificates +### Configuring certificates {#setting-up-certificates} -For a production setup, we strongly recommend using TLS certificates, either for -secure communication or mTLS for mutual authentication. See the below steps to -generate self-signed certificates used in this example. You might want to use -your current cert provisioning procedures to procure a certificate for -production usage. +In a production environment, use TLS certificates for secure communication or +mTLS for mutual authentication. Follow these steps to generate self-signed +certificates as in this example. You might want to use your current cert +provisioning procedures to procure a certificate for production usage. -Install [cfssl](https://github.com/cloudflare/cfssl), and create the following +Install [`cfssl`](https://github.com/cloudflare/cfssl) and create the following `csr.json` file: ```json @@ -737,16 +744,18 @@ Install [cfssl](https://github.com/cloudflare/cfssl), and create the following } ``` -Now, run the following commands: +Then run the following commands: ```bash cfssl genkey -initca csr.json | cfssljson -bare ca cfssl gencert -ca ca.pem -ca-key ca-key.pem csr.json | cfssljson -bare cert ``` -This will create two certificates; first, an "OpenTelemetry Example" Certificate -Authority (CA) in `ca.pem` and the associated key in `ca-key.pem`, and second a -client certificate in `cert.pem` (signed by the OpenTelemetry Example CA) and -the associated key in `cert-key.pem`. +This creates two certificates: + +- An "OpenTelemetry Example" Certificate Authority (CA) in `ca.pem`, with the + associated key in `ca-key.pem` +- A client certificate in `cert.pem`, signed by the OpenTelemetry Example CA, + with the associated key in `cert-key.pem`. [dcc]: /docs/concepts/components/#collector diff --git a/content/en/docs/collector/getting-started.md b/content/en/docs/collector/getting-started.md index a9fdd35098af..b809bba59736 100644 --- a/content/en/docs/collector/getting-started.md +++ b/content/en/docs/collector/getting-started.md @@ -50,7 +50,7 @@ To follow this tutorial you need the following telemetrygen traces --otlp-insecure --duration 5s ``` - After five seconds, `telemetrygen` stops and shows the sended messages in the + After five seconds, `telemetrygen` stops and shows the sent messages in the console: ```text diff --git a/content/en/docs/concepts/signals/baggage.md b/content/en/docs/concepts/signals/baggage.md index 5d3ec9734ce8..c51c416c1090 100644 --- a/content/en/docs/concepts/signals/baggage.md +++ b/content/en/docs/concepts/signals/baggage.md @@ -32,22 +32,27 @@ retrieve information. ## What should OTel Baggage be used for? -OTel Baggage should be used for data that you're okay with potentially exposing -to anyone who inspects your network traffic. This is because it's stored in HTTP -headers alongside the current context. If your relevant network traffic is -entirely within your own network, then this caveat may not apply. - Common use cases include information that’s only accessible further up a stack. This can include things like Account Identification, User IDs, Product IDs, and origin IPs, for example. Passing these down your stack allows you to then add them to your Spans in downstream services to make it easier to filter when you’re searching in your Observability back-end. -There are no built-in integrity checks to ensure that the Baggage items are -yours, so exercise caution when retrieving them. - ![OTel Baggage](/img/otel-baggage-2.svg) +## Baggage security considerations + +Sensitive Baggage items could be shared with unintended resources, like +third-party APIs. This is because automatic instrumentation includes Baggage in +most of your service’s network requests. Specifically, Baggage and other parts +of trace context are sent in HTTP headers, making it visible to anyone +inspecting your network traffic. If traffic is restricted within your network, +then this risk may not apply, but keep in mind that downstream services could +propagate Baggage outside your network. + +Also, there are no built-in integrity checks to ensure that Baggage items are +yours, so exercise caution when retrieving them. + ## Baggage is not the same as Span attributes One important thing to note about Baggage is that it is not a subset of the diff --git a/content/en/docs/demo/logging-features.md b/content/en/docs/demo/logging-features.md index 22eb069467cc..acc5ca40f88a 100644 --- a/content/en/docs/demo/logging-features.md +++ b/content/en/docs/demo/logging-features.md @@ -13,7 +13,7 @@ aliases: [log_service_features] | Currency | C++ | 🚧 | | Email | Ruby | 🚧 | | Feature Flag | Erlang / Elixir | 🚧 | -| Fraud Detection | Kotlin | 🚧 | +| Fraud Detection | Kotlin | ✅ | | Frontend | TypeScript | 🚧 | | Payment | JavaScript | 🚧 | | Product Catalog | Go | 🚧 | diff --git a/content/en/docs/instrumentation/cpp/manual.md b/content/en/docs/instrumentation/cpp/manual.md index 2217e09a7218..930cbd6fdb41 100644 --- a/content/en/docs/instrumentation/cpp/manual.md +++ b/content/en/docs/instrumentation/cpp/manual.md @@ -227,7 +227,9 @@ p->AddView(std::move(observable_instrument_selector), std::move(observable_meter ## Logs -The logs API & SDK are currently under development. +The documentation for the logs API & SDK is missing, you can help make it +available by +[editing this page](https://github.com/open-telemetry/opentelemetry.io/edit/main/content/en/docs/instrumentation/cpp/manual.md). ## Next Steps diff --git a/content/en/docs/instrumentation/erlang/exporters.md b/content/en/docs/instrumentation/erlang/exporters.md index 8f12600bedb4..f17a549184a4 100644 --- a/content/en/docs/instrumentation/erlang/exporters.md +++ b/content/en/docs/instrumentation/erlang/exporters.md @@ -27,7 +27,7 @@ Zipkin also run by [docker-compose](https://docs.docker.com/compose/). To export to the running Collector the `opentelemetry_exporter` package must be added to the project's dependencies: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang {deps, [{opentelemetry_api, "~> {{% param versions.otelApi %}}"}, @@ -56,7 +56,7 @@ attempts to initialize and use the exporter. Example of Release configuration in `rebar.config` and for [mix's Release task](https://hexdocs.pm/mix/Mix.Tasks.Release.html): -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% rebar.config @@ -94,7 +94,7 @@ the HTTP protocol with endpoint of `localhost` on port `4318`. If using `grpc` for the `otlp_protocol` the endpoint should be changed to `http://localhost:4317`. -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% config/sys.config.src diff --git a/content/en/docs/instrumentation/erlang/getting-started.md b/content/en/docs/instrumentation/erlang/getting-started.md index 906850559f55..969ba1d0983a 100644 --- a/content/en/docs/instrumentation/erlang/getting-started.md +++ b/content/en/docs/instrumentation/erlang/getting-started.md @@ -304,7 +304,7 @@ more telemetry backends. To get started with this guide, create a new project with `rebar3` or `mix`: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang rebar3 new release otel_getting_started @@ -322,7 +322,7 @@ Then, in the project you just created, add both `opentelemetry_api` and `opentelemetry` as dependencies. We add both because this is a project we will run as a Release and export spans from. -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang {deps, [{opentelemetry_api, "~> {{% param versions.otelApi %}}"}, @@ -346,7 +346,7 @@ In the case of Erlang, the API Application will also need to be added to `src/otel_getting_started.app.src` and a `relx` section to `rebar.config`. In an Elixir project, a `releases` section needs to be added to `mix.exs`: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% src/otel_getting_started.app.src @@ -413,7 +413,7 @@ To configure OpenTelemetry to use a particular exporter, in this case the `exporter` for the span processor `otel_batch_processor`, a type of span processor that batches up multiple spans over a period of time: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% config/sys.config.src @@ -440,7 +440,7 @@ config :opentelemetry, Now that the dependencies and configuration are set up, we can create a module with a function `hello/0` that starts some spans: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% apps/otel_getting_started/src/otel_getting_started.erl diff --git a/content/en/docs/instrumentation/erlang/manual.md b/content/en/docs/instrumentation/erlang/manual.md index 318bc3865be0..56552699c506 100644 --- a/content/en/docs/instrumentation/erlang/manual.md +++ b/content/en/docs/instrumentation/erlang/manual.md @@ -53,7 +53,7 @@ interactive shell, a `Tracer` with a blank name and version is used. The created `Tracer`'s record can be looked up by the name of a module in the OTP Application: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang opentelemetry:get_application_tracer(?MODULE) @@ -75,7 +75,7 @@ This is how the Erlang and Elixir macros for starting and updating `Spans` get a Now that you have [Tracer](/docs/concepts/signals/traces/#tracer)s initialized, you can create [Spans](/docs/concepts/signals/traces/#spans). -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang ?with_span(main, #{}, fun() -> @@ -104,7 +104,7 @@ common kind of Span to create. ### Create Nested Spans -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang parent_function() -> @@ -160,7 +160,7 @@ attaching the context and setting the new span as currently active in the process. The whole context should be attached in order to not lose other telemetry data like [baggage](/docs/specs/otel/baggage/api/). -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang SpanCtx = ?start_span(child), @@ -204,7 +204,7 @@ Span Links that causally link it to another Span. A [Link](/docs/concepts/signals/traces/#span-links) needs a Span context to be created. -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang Parent = ?current_span_ctx, @@ -243,7 +243,7 @@ The following example shows the two ways of setting attributes on a span by both setting an attribute in the start options and then again with `set_attributes` in the body of the span operation: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang ?with_span(my_span, #{attributes => [{'start-opts-attr', <<"start-opts-value">>}]}, @@ -276,7 +276,7 @@ from the specification and provided in For example, an instrumentation for an HTTP client or server would need to include semantic attributes like the scheme of the URL: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang -include_lib("opentelemetry_semantic_conventions/include/trace.hrl"). @@ -306,7 +306,7 @@ message on an [Span](/docs/concepts/signals/traces/#spans) that represents a discrete event with no duration that can be tracked by a single timestamp. You can think of it like a primitive log. -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang ?add_event(<<"Gonna try it">>), @@ -330,7 +330,7 @@ Tracer.add_event("Did it!") Events can also have attributes of their own: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang ?add_event(<<"Process exited with reason">>, [{pid, Pid)}, {reason, Reason}])) @@ -354,7 +354,7 @@ could override the Error status with `StatusCode.OK`, but don’t set The status can be set at any time before the span is finished: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang -include_lib("opentelemetry_api/include/opentelemetry.hrl"). diff --git a/content/en/docs/instrumentation/erlang/propagation.md b/content/en/docs/instrumentation/erlang/propagation.md index 11e2c3289bb8..edf4fb7c49f4 100644 --- a/content/en/docs/instrumentation/erlang/propagation.md +++ b/content/en/docs/instrumentation/erlang/propagation.md @@ -26,7 +26,7 @@ propagators. By default the global propagators used are the W3C These global propagators can be configured by the Application environment variable `text_map_propagators`: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% sys.config @@ -55,7 +55,7 @@ and `b3multi`. To manually inject or extract context the `otel_propagator_text_map` module can be used: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% uses the context from the process dictionary to add to an empty list of headers diff --git a/content/en/docs/instrumentation/erlang/resources.md b/content/en/docs/instrumentation/erlang/resources.md index 9049ce834a2c..1c4063923bc4 100644 --- a/content/en/docs/instrumentation/erlang/resources.md +++ b/content/en/docs/instrumentation/erlang/resources.md @@ -20,7 +20,7 @@ detectors use the OS environment variable `OTEL_RESOURCE_ATTRIBUTES` and the The detectors to use is a list of module names and can be configured in the Application configuration: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% sys.config @@ -61,7 +61,7 @@ OTEL_RESOURCE_ATTRIBUTES="deployment.environment=development" Alternatively, use the `resource` Application environment under the `opentelemetry` Application configuration of `sys.config` or `runtime.exs`: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% sys.config diff --git a/content/en/docs/instrumentation/erlang/sampling.md b/content/en/docs/instrumentation/erlang/sampling.md index b520374142fd..f7e6afe6ffd0 100644 --- a/content/en/docs/instrumentation/erlang/sampling.md +++ b/content/en/docs/instrumentation/erlang/sampling.md @@ -58,7 +58,7 @@ This tells the SDK to sample spans such that only 10% of Traces get created. Example in the Application configuration with a root sampler for sampling 10% of Traces and using the parent decision in the other cases: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% config/sys.config.src @@ -107,7 +107,7 @@ export OTEL_TRACES_SAMPLER="parentbased_always_off" Here's an example in the Application configuration with a root sampler that always samples and using the parent decision in the other cases: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% config/sys.config.src @@ -137,7 +137,7 @@ Custom samplers can be created by implementing the [`otel_sampler` behaviour](https://hexdocs.pm/opentelemetry/1.3.0/otel_sampler.html#callbacks). This example sampler: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang -module(attribute_sampler). @@ -202,7 +202,7 @@ passed as the sampler's configuration. Example configuration to not sample any Span with an attribute specifying the URL requested is `/healthcheck`: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang {opentelemetry, {sampler, {attributes_sampler, #{'http.target' => <<"/healthcheck">>}}}} diff --git a/content/en/docs/instrumentation/erlang/testing.md b/content/en/docs/instrumentation/erlang/testing.md index 3f4e2b49efc1..772a474fef9e 100644 --- a/content/en/docs/instrumentation/erlang/testing.md +++ b/content/en/docs/instrumentation/erlang/testing.md @@ -15,7 +15,7 @@ validation. Only the `opentelemetry` and `opentelemetry_api` libraries are required for testing in Elixir/Erlang: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang {deps, [{opentelemetry_api, "~> {{% param versions.otelApi %}}"}, @@ -39,7 +39,7 @@ Set your `exporter` to `:none` and the span processor to `:otel_simple_processor`. This ensure that your tests don't actually export data to a destination, and that spans can be analyzed after they are processed. -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% config/sys.config.src @@ -69,7 +69,7 @@ A modified version of the `hello` function from the [Getting Started](/docs/instrumentation/erlang/getting-started/) guide will serve as our test case: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang %% apps/otel_getting_started/src/otel_getting_started.erl @@ -108,7 +108,7 @@ end ## Testing -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Erlang %}} +{{< tabpane text=true >}} {{% tab Erlang %}} ```erlang -module(otel_getting_started_SUITE). diff --git a/content/en/docs/instrumentation/java/_index.md b/content/en/docs/instrumentation/java/_index.md index e48b3a86f369..ee92b9e2a9f3 100644 --- a/content/en/docs/instrumentation/java/_index.md +++ b/content/en/docs/instrumentation/java/_index.md @@ -6,9 +6,9 @@ description: >- aliases: [/java, /java/metrics, /java/tracing] cascade: vers: - instrumentation: 1.31.0 + instrumentation: 1.32.0 otel: 1.32.0 - semconv: 1.22.0 + semconv: 1.23.1 weight: 18 --- diff --git a/content/en/docs/instrumentation/java/automatic/spring-boot.md b/content/en/docs/instrumentation/java/automatic/spring-boot.md index 9f42c102cd46..f0ebbeedd90e 100644 --- a/content/en/docs/instrumentation/java/automatic/spring-boot.md +++ b/content/en/docs/instrumentation/java/automatic/spring-boot.md @@ -3,7 +3,7 @@ title: Spring Boot linkTitle: Spring Boot weight: 30 description: Spring instrumentation for OpenTelemetry Java -cSpell:ignore: autoconfigure springboot +cSpell:ignore: autoconfigure datasource logback springboot --- You can use the [OpenTelemetry Java agent](..) with byte code instrumentation to @@ -17,6 +17,10 @@ instrument your application. The OpenTelemetry starter is compatible with Spring Boot 2.0 and 3.0, and Spring native. +For an example Spring Boot Native image application that uses the OpenTelemetry +Spring Boot starter, see +[opentelemetry-java-examples/spring-native](https://github.com/open-telemetry/opentelemetry-java-examples/tree/main/spring-native). + ## Configuration Add the dependency given below to enable the OpenTelemetry starter. @@ -37,7 +41,7 @@ auto-configuration, see the configuration [README]. io.opentelemetry.instrumentation opentelemetry-spring-boot-starter - {{% param vers.instrumentation %}} + {{% param vers.instrumentation %}}-alpha ``` @@ -46,7 +50,7 @@ auto-configuration, see the configuration [README]. ```groovy dependencies { - implementation('io.opentelemetry.instrumentation:opentelemetry-spring-boot-starter:{{% param vers.instrumentation %}}') + implementation('io.opentelemetry.instrumentation:opentelemetry-spring-boot-starter:{{% param vers.instrumentation %}}-alpha') } ``` @@ -54,5 +58,115 @@ dependencies { ## Additional instrumentations -You can configure additional instrumentations with +### JDBC Instrumentation + +You have two ways to enable the JDBC instrumentation with the OpenTelemetry +starter. + +If your application does not declare `DataSource` bean, you can update your +`application.properties` file to have the data source URL starting with +`jdbc:otel:` and set the driver class to +`io.opentelemetry.instrumentation.jdbc.OpenTelemetryDriver`. + +```properties +spring.datasource.url=jdbc:otel:h2:mem:db +spring.datasource.driver-class-name=io.opentelemetry.instrumentation.jdbc.OpenTelemetryDriver +``` + +You can also wrap the `DataSource` bean in an +`io.opentelemetry.instrumentation.jdbc.datasource.OpenTelemetryDataSource`: + +```java +import io.opentelemetry.instrumentation.jdbc.datasource.JdbcTelemetry; + +@Configuration +public class DataSourceConfig { + + @Bean + public DataSource dataSource(OpenTelemetry openTelemetry) { + DataSourceBuilder dataSourceBuilder = DataSourceBuilder.create(); + //Data source configurations + DataSource dataSource = dataSourceBuilder.build(); + return JdbcTelemetry.create(openTelemetry).wrap(dataSource); + } + +} +``` + +With the datasource configuration, you need to add the following dependency: + +{{< tabpane text=true >}} {{% tab header="Maven (`pom.xml`)" lang=Maven %}} + +```xml + + + io.opentelemetry.instrumentation + opentelemetry-jdbc + {{% param vers.instrumentation %}}-alpha + + +``` + +{{% /tab %}} {{% tab header="Gradle (`gradle.build`)" lang=Gradle %}} + +```groovy +dependencies { + implementation('io.opentelemetry.instrumentation:opentelemetry-jdbc:{{% param vers.instrumentation %}}-alpha') +} +``` + +{{% /tab %}} {{< /tabpane>}} + +### Logging Instrumentation + +To enable the logging instrumentation for Logback you have to add the +OpenTelemetry appender in your `logback.xml` or `logback-spring.xml` file: + +```xml + + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + + + +``` + +For Log4j 2, you have to add the OpenTelemetry appender to your `log4j2.xml` +file: + +```xml + + + + + + + + + + + +``` + +You can find more configuration options for the OpenTelemetry appender in the +documentation of the +[Logback](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/logback/logback-appender-1.0/library/README.md) +and +[Log4j](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/log4j/log4j-appender-2.17/library/README.md) +instrumentation libraries. + +### Other Instrumentation + +You can configure other instrumentations with [OpenTelemetry instrumentations libraries](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/docs/supported-libraries.md#libraries--frameworks). diff --git a/content/en/docs/instrumentation/java/exporters.md b/content/en/docs/instrumentation/java/exporters.md index e3e5da9baf29..30fc055302c0 100644 --- a/content/en/docs/instrumentation/java/exporters.md +++ b/content/en/docs/instrumentation/java/exporters.md @@ -17,13 +17,61 @@ how to setup exporters following the ## OTLP -To send trace data to a OTLP endpoint (like the [collector](/docs/collector) or -Jaeger) you'll want to use `opentelemetry-exporter-otlp`. +### Collector Setup -### OTLP Artifacts +{{% alert title="Note" color="info" %}} + +If you have a OTLP collector or backend already set up, you can skip this +section and [setup the OTLP exporter dependencies](#otlp-dependencies) for your +application. -There are multiple OTLP options available, each catering to different use cases. -For most users, the default artifact will suffice and be the most simple: +{{% /alert %}} + +To try out and verify your OTLP exporters, you can run the collector in a docker +container that writes telemetry directly to the console. + +In an empty directory, create a file called `collector-config.yaml` with the +following content: + +```yaml +receivers: + otlp: + protocols: + grpc: + http: +exporters: + debug: + verbosity: detailed +service: + pipelines: + traces: + receivers: [otlp] + exporters: [debug] + metrics: + receivers: [otlp] + exporters: [debug] + logs: + receivers: [otlp] + exporters: [debug] +``` + +Now run the collector in a docker container: + +```shell +docker run -p 4317:4317 -p 4318:4318 --rm -v $(pwd)/collector-config.yaml:/etc/otelcol/config.yaml otel/opentelemetry-collector +``` + +This collector is now able to accept telemetry via OTLP. Later you may want to +[configure the collector](/docs/collector/configuration) to send your telemetry +to your observability backend. + +### Dependencies {#otlp-dependencies} + +If you want to send telemetry data to an OTLP endpoint (like the +[OpenTelemetry Collector](#collector-setup), [Jaeger](#jaeger) or +[Prometheus](#prometheus)), there are multiple OTLP options available, each +catering to different use cases. For most users, the default artifact will +suffice and be the most simple: {{< tabpane text=true >}} {{% tab Gradle %}} @@ -171,14 +219,251 @@ public class DiceApplication { } ``` -To see the traces exported quickly, you can run Jaeger with OTLP enabled in a -docker container: +## Console + +To debug your instrumentation or see the values locally in development, you can +use exporters writing telemetry data to the console (stdout). + +If you followed the +[Getting Started](/docs/instrumentation/java/getting-started/) or +[Manual Instrumentation](/docs/instrumentation/java/manual/) guides, you already +have the console exporter installed. + +The `LoggingSpanExporter`, the `LoggingMetricExporter` and the +`SystemOutLogRecordExporter` are included in the +`opentelemetry-exporter-logging` artifact. + +If you use +[SDK auto-configuration](/docs/instrumentation/java/manual/#automatic-configuration) +all you need to do is update your environment variables: + +```shell +env OTEL_TRACES_EXPORTER=logging OTEL_METRICS_EXPORTER=logging OTEL_LOGS_EXPORTER=logging java -jar ./build/libs/java-simple.jar +``` + +## Jaeger + +[Jaeger](https://www.jaegertracing.io/) natively supports OTLP to receive trace +data. You can run Jaeger in a docker container with the UI accessible on port +16686 and OTLP enabled on ports 4137 and 4138: ```shell -docker run -d --name jaeger \ - -e COLLECTOR_OTLP_ENABLED=true \ +docker run --rm \ + -e COLLECTOR_ZIPKIN_HOST_PORT=:9411 \ -p 16686:16686 \ -p 4317:4317 \ -p 4318:4318 \ + -p 9411:9411 \ jaegertracing/all-in-one:latest ``` + +Now following the instruction to setup the [OTLP exporters](#otlp-dependencies). + +## Prometheus + +To send your metric data to [Prometheus](https://prometheus.io/), you can either +[enable Prometheus' OTLP Receiver](https://prometheus.io/docs/prometheus/latest/feature_flags/#otlp-receiver) +and use the [OTLP exporter](#otlp) or you can use the +[`PrometheusHttpServer`](https://javadoc.io/doc/io.opentelemetry/opentelemetry-exporter-prometheus/latest/io/opentelemetry/exporter/prometheus/PrometheusHttpServer.html), +a `MetricReader`, that starts an HTTP server that will collect metrics and +serialize to Prometheus text format on request. + +### Backend Setup {#prometheus-setup} + +{{% alert title="Note" color="info" %}} + +If you have Prometheus or a Prometheus-compatible backend already set up, you +can skip this section and setup the [Prometheus](#prometheus-dependencies) or +[OTLP](#otlp-dependencies) exporter dependencies for your application. + +{{% /alert %}} + +You can run [Prometheus](https://prometheus.io) in a docker container, +accessible on port `9090` by following these instructions: + +Create a file called `prometheus.yml` with the following content: + +```yaml +scrape_configs: + - job_name: dice-service + scrape_interval: 5s + static_configs: + - targets: [host.docker.internal:9464] +``` + +Run Prometheus in a docker container with the UI accessible on port `9090`: + +```shell +docker run --rm -v ${PWD}/prometheus.yml:/prometheus/prometheus.yml -p 9090:9090 prom/prometheus --enable-feature=otlp-write-receive +``` + +{{% alert title="Note" color="info" %}} + +When using Prometheus' OTLP Receiver, make sure that you set the OTLP endpoint +for metrics in your application to `http://localhost:9090/api/v1/otlp`. + +Not all docker environments support `host.docker.internal`. In some cases you +may need to replace `host.docker.internal` with `localhost` or the IP address of +your machine. + +{{% /alert %}} + +### Dependencies {#prometheus-dependencies} + +Install the +[`opentelemetry-exporter-prometheus`](https://javadoc.io/doc/io.opentelemetry/opentelemetry-exporter-prometheus/latest) +artifact as a dependency for your application: + +{{< tabpane text=true >}} {{% tab Gradle %}} + +```kotlin +dependencies { + implementation 'io.opentelemetry:opentelemetry-exporter-prometheus:{{% param vers.otel %}}-alpha' +} +``` + +{{% /tab %}} {{% tab Maven %}} + +```xml + + + + io.opentelemetry + opentelemetry-exporter-prometheus + + + +``` + +{{< /tab >}} {{< /tabpane>}} + +Update your OpenTelemetry configuration to use the exporter and to send data to +your Prometheus backend: + +```java +import io.opentelemetry.exporter.prometheus.PrometheusHttpServer; + +int prometheusPort = 9464; +SdkMeterProvider sdkMeterProvider = SdkMeterProvider.builder() + .registerMetricReader(PrometheusHttpServer.builder().setPort(prometheusPort).build()) + .setResource(resource) + .build(); +``` + +With the above you can access your metrics at . +Prometheus or an OpenTelemetry Collector with the Prometheus receiver can scrape +the metrics from this endpoint. + +## Zipkin + +### Backend Setup {#zipkin-setup} + +{{% alert title="Note" color="info" %}} + +If you have Zipkin or a Zipkin-compatible backend already set up, you can skip +this section and setup the [Zipkin exporter dependencies](#zipkin-dependencies) +for your application. + +{{% /alert %}} + +You can run [Zipkin](https://zipkin.io/) on ina Docker container by executing +the following command: + +```shell +docker run --rm -d -p 9411:9411 --name zipkin openzipkin/zipkin +``` + +### Dependencies {#zipkin-dependencies} + +To send your trace data to [Zipkin](https://zipkin.io/), you can use the +`ZipkinSpanExporter`. + +Install the +[`opentelemetry-exporter-zipkin`](https://javadoc.io/doc/io.opentelemetry/opentelemetry-exporter-zipkin/latest) +artifact as a dependency for your application: + +{{< tabpane text=true >}} {{% tab Gradle %}} + +```kotlin +dependencies { + implementation 'io.opentelemetry:opentelemetry-exporter-zipkin:{{% param vers.otel %}}-alpha' +} +``` + +{{% /tab %}} {{% tab Maven %}} + +```xml + + + + io.opentelemetry + opentelemetry-exporter-zipkin + + + +``` + +{{< /tab >}} {{< /tabpane>}} + +Update your OpenTelemetry configuration to use the exporter and to send data to +your Zipkin backend: + +```java +import io.opentelemetry.exporter.zipkin.ZipkinSpanExporter; + +SdkTracerProvider sdkTracerProvider = SdkTracerProvider.builder() + .addSpanProcessor(BatchSpanProcessor.builder(ZipkinSpanExporter.builder().setEndpoint("http://localhost:9411/api/v2/spans").build()).build()) + .setResource(resource) + .build(); +``` + +## Other available exporters + +There are many other exporters available. For a list of available exporters, see +the [registry](/ecosystem/registry/?component=exporter&language=java). + +Finally, you can also write your own exporter. For more information, see the +[SpanExporter Interface in the API documentation](https://javadoc.io/doc/io.opentelemetry/opentelemetry-sdk-trace/latest/io/opentelemetry/sdk/trace/export/SpanExporter.html). + +## Batching spans and log records + +For traces the OpenTelemetry SDK provides a set of default span and log record +processors, that allow you to either emit them one-by-one ("simple") or batched: + +{{< tabpane text=true >}} {{% tab Batch %}} + +```java +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.sdk.logs.export.BatchLogRecordProcessor; + +SdkTracerProvider sdkTracerProvider = SdkTracerProvider.builder() + .addSpanProcessor(BatchSpanProcessor.builder(...).build()) + .setResource(resource) + .build(); + +SdkLoggerProvider sdkLoggerProvider = SdkLoggerProvider.builder() + .addLogRecordProcessor( + BatchLogRecordProcessor.builder(...).build()) + .setResource(resource) + .build(); +``` + +{{% /tab %}} {{% tab Simple %}} + +```java +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import io.opentelemetry.sdk.logs.export.SimpleLogRecordProcessor; + +SdkTracerProvider sdkTracerProvider = SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.builder(...).build()) + .setResource(resource) + .build(); + +SdkLoggerProvider sdkLoggerProvider = SdkLoggerProvider.builder() + .addLogRecordProcessor( + SimpleLogRecordProcessor.builder(...).build()) + .setResource(resource) + .build(); +``` + +{{< /tab >}} {{< /tabpane>}} diff --git a/content/en/docs/instrumentation/js/exporters.md b/content/en/docs/instrumentation/js/exporters.md index e2c11acdb585..8a36a0166ca4 100644 --- a/content/en/docs/instrumentation/js/exporters.md +++ b/content/en/docs/instrumentation/js/exporters.md @@ -109,7 +109,7 @@ JavaScript) from the [Getting Started](/docs/instrumentation/js/getting-started/nodejs/) like the following to export traces and metrics via OTLP (`http/protobuf`) : -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Typescript %}} +{{< tabpane text=true >}} {{% tab Typescript %}} ```ts /*instrumentation.ts*/ @@ -374,7 +374,7 @@ npm install --save @opentelemetry/exporter-prometheus Update your OpenTelemetry configuration to use the exporter and to send data to your Prometheus backend: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Typescript %}} +{{< tabpane text=true >}} {{% tab Typescript %}} ```ts import * as opentelemetry from '@opentelemetry/sdk-node'; @@ -449,7 +449,7 @@ npm install --save @opentelemetry/exporter-zipkin Update your OpenTelemetry configuration to use the exporter and to send data to your Zipkin backend: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Typescript %}} +{{< tabpane text=true >}} {{% tab Typescript %}} ```ts import * as opentelemetry from '@opentelemetry/sdk-node'; @@ -495,7 +495,7 @@ allow you to either emit spans one-by-one or batched. If not specified otherwise the SDK will use the `BatchSpanProcessor`. If you do not want to batch your spans, you can use the `SimpleSpanProcessor` instead as follows: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab Typescript %}} +{{< tabpane text=true >}} {{% tab Typescript %}} ```ts /*instrumentation.ts*/ diff --git a/content/en/docs/instrumentation/js/getting-started/nodejs.md b/content/en/docs/instrumentation/js/getting-started/nodejs.md index 675a3a6e0dce..5c1e96e43ec1 100644 --- a/content/en/docs/instrumentation/js/getting-started/nodejs.md +++ b/content/en/docs/instrumentation/js/getting-started/nodejs.md @@ -70,7 +70,7 @@ npm install express Create a file named `app.ts` (or `app.js` if not using TypeScript) and add the following code to it: -{{% tabpane text=true langEqualsHeader=true %}} {{% tab TypeScript %}} +{{% tabpane text=true %}} {{% tab TypeScript %}} ```ts /*app.ts*/ @@ -171,7 +171,7 @@ application code. One tool commonly used for this task is the Create a file named `instrumentation.ts` (or `instrumentation.js` if not using TypeScript) , which will contain your instrumentation setup code. -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts /*instrumentation.ts*/ @@ -484,7 +484,7 @@ If you'd like to explore a more complex example, take a look at the Did something go wrong? You can enable diagnostic logging to validate that OpenTelemetry is initialized correctly: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts /*instrumentation.ts*/ diff --git a/content/en/docs/instrumentation/js/libraries.md b/content/en/docs/instrumentation/js/libraries.md index b7907b56f636..23eedf434c21 100644 --- a/content/en/docs/instrumentation/js/libraries.md +++ b/content/en/docs/instrumentation/js/libraries.md @@ -251,17 +251,19 @@ with a request hook: {{% tab TypeScript %}} ```typescript +import { Span } from '@opentelemetry/api'; import { SemanticResourceAttributes } from '@opentelemetry/semantic-conventions'; import { ExpressInstrumentation, ExpressLayerType, + ExpressRequestInfo, } from '@opentelemetry/instrumentation-express'; const expressInstrumentation = new ExpressInstrumentation({ requestHook: function (span: Span, info: ExpressRequestInfo) { if (info.layerType === ExpressLayerType.REQUEST_HANDLER) { - span.setAttribute([SemanticAttributes.HTTP_METHOD], info.request.method); - span.setAttribute([SemanticAttributes.HTTP_URL], info.request.baseUrl); + span.setAttribute(SemanticAttributes.HTTP_METHOD, info.request.method); + span.setAttribute(SemanticAttributes.HTTP_URL, info.request.baseUrl); } }, }); @@ -282,8 +284,8 @@ const { const expressInstrumentation = new ExpressInstrumentation({ requestHook: function (span, info) { if (info.layerType === ExpressLayerType.REQUEST_HANDLER) { - span.setAttribute([SemanticAttributes.HTTP_METHOD], info.request.method); - span.setAttribute([SemanticAttributes.HTTP_URL], info.request.baseUrl); + span.setAttribute(SemanticAttributes.HTTP_METHOD, info.request.method); + span.setAttribute(SemanticAttributes.HTTP_URL, info.request.baseUrl); } }, }); diff --git a/content/en/docs/instrumentation/js/manual.md b/content/en/docs/instrumentation/js/manual.md index 334d6656e8f7..07ecb768a9d4 100644 --- a/content/en/docs/instrumentation/js/manual.md +++ b/content/en/docs/instrumentation/js/manual.md @@ -71,7 +71,7 @@ imported as a dependency by the _app file_. Create the _library file_ named `dice.ts` (or `dice.js` if you are not using TypeScript) and add the following code to it: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts /*dice.ts*/ @@ -112,7 +112,7 @@ module.exports = { rollTheDice }; Create the _app file_ named `app.ts` (or `app.js` if not using TypeScript) and add the following code to it: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts /*app.ts*/ @@ -212,7 +212,7 @@ SDK. If you fail to initialize the SDK or initialize it too late, no-op implementations will be provided to any library that acquires a tracer or meter from the API. -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts /*instrumentation.ts*/ @@ -344,7 +344,7 @@ npm install @opentelemetry/sdk-trace-web Next, update `instrumentation.ts` (or `instrumentation.js`) to contain all the SDK initialization code in it: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts import { Resource } from '@opentelemetry/resources'; @@ -434,7 +434,7 @@ In most cases, stick with `BatchSpanProcessor` over `SimpleSpanProcessor`. Anywhere in your application where you write manual tracing code should call `getTracer` to acquire a tracer. For example: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts import opentelemetry from '@opentelemetry/api'; @@ -480,7 +480,7 @@ tracer may be acquired with an appropriate Instrumentation Scope: First, in the _application file_ `app.ts` (or `app.js`): -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts /*app.ts*/ @@ -542,7 +542,7 @@ app.listen(PORT, () => { And second, in the _library file_ `dice.ts` (or `dice.js`): -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts /*dice.ts*/ @@ -608,7 +608,7 @@ care of setting the span and its context active. The code below illustrates how to create an active span. -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts import { trace, Span } from '@opentelemetry/api'; @@ -695,7 +695,7 @@ nested in nature. For example, the `rollOnce()` function below represents a nested operation. The following sample creates a nested span that tracks `rollOnce()`: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts function rollOnce(i: number, min: number, max: number) { @@ -837,7 +837,7 @@ const span = opentelemetry.trace.getSpan(ctx); pairs to a [`Span`](/docs/concepts/signals/traces/#spans) so it carries more information about the current operation that it's tracking. -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts function rollOnce(i: number, min: number, max: number) { @@ -885,7 +885,7 @@ tracer.startActiveSpan( ); ``` -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts function rollTheDice(rolls: number, min: number, max: number) { @@ -931,7 +931,7 @@ npm install --save @opentelemetry/semantic-conventions Add the following to the top of your application file: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts import { SemanticAttributes } from '@opentelemetry/semantic-conventions'; @@ -1017,7 +1017,7 @@ typically used to specify that a span has not completed successfully - The status can be set at any time before the span is finished: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts import opentelemetry, { SpanStatusCode } from '@opentelemetry/api'; @@ -1071,7 +1071,7 @@ explicitly tracking an error. It can be a good idea to record exceptions when they happen. It's recommended to do this in conjunction with setting [span status](#span-status). -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts import opentelemetry, { SpanStatusCode } from '@opentelemetry/api'; @@ -1114,7 +1114,7 @@ nested spans. Initializing tracing is similar to how you'd do it with Node.js or the Web SDK. -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts import opentelemetry from '@opentelemetry/api'; @@ -1264,7 +1264,7 @@ If you have not created it for tracing already, create a separate `instrumentation.ts` (or `instrumentation.js`) file that has all the SDK initialization code in it: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts import opentelemetry from '@opentelemetry/api'; @@ -1363,7 +1363,7 @@ Now that a `MeterProvider` is configured, you can acquire a `Meter`. Anywhere in your application where you have manually instrumented code you can call `getMeter` to acquire a meter. For example: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts import opentelemetry from '@opentelemetry/api'; @@ -1455,7 +1455,7 @@ Histograms are used to measure a distribution of values over time. For example, here's how you report a distribution of response times for an API route with Express: -{{< tabpane text=true langEqualsHeader=true >}} {{% tab TypeScript %}} +{{< tabpane text=true >}} {{% tab TypeScript %}} ```ts import express from 'express'; diff --git a/content/en/docs/instrumentation/net/automatic/getting-started.md b/content/en/docs/instrumentation/net/automatic/getting-started.md new file mode 100644 index 000000000000..ddf5b52a7372 --- /dev/null +++ b/content/en/docs/instrumentation/net/automatic/getting-started.md @@ -0,0 +1,314 @@ +--- +title: Getting Started +description: Get telemetry for your app in less than 5 minutes! +cSpell:ignore: ASPNETCORE rolldice +weight: 5 +--- + +This page will show you how to get started with OpenTelemetry .NET Automatic +Instrumentation. + +If you are looking for a way to manually instrument your application, check out +[this guide](/docs/instrumentation/net/getting-started). + +You will learn how you can instrument a simple .NET application automatically, +in such a way that [traces][], [metrics][] and [logs][] are emitted to the +console. + +## Prerequisites + +Ensure that you have the following installed locally: + +- [.NET SDK](https://dotnet.microsoft.com/download/dotnet) 6+ + +## Example Application + +The following example uses a basic +[Minimal API with ASP.NET Core](https://learn.microsoft.com/aspnet/core/tutorials/min-web-api) +application. If you are not using ASP.NET Core, that's OK — you can still use +OpenTelemetry .NET Automatic Instrumentation. + +For more elaborate examples, see +[examples](/docs/instrumentation/net/examples/). + +### Create and launch an HTTP Server + +To begin, set up an environment in a new directory called `dotnet-simple`. +Within that directory, execute following command: + +```sh +dotnet new web +``` + +In the same directory, replace the content of `Program.cs` with the following +code: + +```csharp +using System.Globalization; + +var builder = WebApplication.CreateBuilder(args); +var app = builder.Build(); + +var logger = app.Logger; + +int RollDice() +{ + return Random.Shared.Next(1, 7); +} + +string HandleRollDice(string? player) +{ + var result = RollDice(); + + if (string.IsNullOrEmpty(player)) + { + logger.LogInformation("Anonymous player is rolling the dice: {result}", result); + } + else + { + logger.LogInformation("{player} is rolling the dice: {result}", player, result); + } + + return result.ToString(CultureInfo.InvariantCulture); +} + +app.MapGet("/rolldice/{player?}", HandleRollDice); + +app.Run(); +``` + +In the `Properties` subdirectory, replace the content of `launchSettings.json` +with the following: + +```json +{ + "$schema": "http://json.schemastore.org/launchsettings.json", + "profiles": { + "http": { + "commandName": "Project", + "dotnetRunMessages": true, + "launchBrowser": true, + "applicationUrl": "http://localhost:8080", + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development" + } + } + } +} +``` + +Build and run the application with the following command, then open + in your web browser to ensure it is working. + +```sh +dotnet build +dotnet run +``` + +## Instrumentation + +Next, you'll use a [OpenTelemetry .NET Automatic Instrumentation](../) to +instrument the application at launch time. While you can [configure .NET +Automatic Instrumentation][] in a number of ways, the steps below use Unix-shell +or PowerShell scripts. + +> **Note**: PowerShell commands require elevated (administrator) privileges. + +1. Download installation scripts from [Releases][] of the + `opentelemetry-dotnet-instrumentation` repository: + + {{< tabpane text=true >}} {{% tab Unix-shell %}} + + ```sh + curl -L -O https://github.com/open-telemetry/opentelemetry-dotnet-instrumentation/releases/latest/download/otel-dotnet-auto-install.sh + ``` + + {{% /tab %}} {{% tab PowerShell - Windows %}} + + ```powershell + $module_url = "https://github.com/open-telemetry/opentelemetry-dotnet-instrumentation/releases/latest/download/OpenTelemetry.DotNet.Auto.psm1" + $download_path = Join-Path $env:temp "OpenTelemetry.DotNet.Auto.psm1" + Invoke-WebRequest -Uri $module_url -OutFile $download_path -UseBasicParsing + ``` + + {{% /tab %}} {{< /tabpane >}} + +2. Execute following script to download automatic instrumentation for your + development environment: + + {{< tabpane text=true >}} {{% tab Unix-shell %}} + + ```sh + ./otel-dotnet-auto-install.sh + ``` + + {{% /tab %}} {{% tab PowerShell - Windows %}} + + ```powershell + Import-Module $download_path + Install-OpenTelemetryCore + ``` + + {{% /tab %}} {{< /tabpane >}} + +3. Set and export variables that specify a [console exporter][], then execute + script configuring other necessary environment variables using a notation + suitable for your shell/terminal environment — we illustrate a notation + for bash-like shells and PowerShell: + + {{< tabpane text=true >}} {{% tab Unix-shell %}} + + ```sh + export OTEL_TRACES_EXPORTER=none \ + OTEL_METRICS_EXPORTER=none \ + OTEL_LOGS_EXPORTER=none \ + OTEL_DOTNET_AUTO_TRACES_CONSOLE_EXPORTER_ENABLED=true \ + OTEL_DOTNET_AUTO_METRICS_CONSOLE_EXPORTER_ENABLED=true \ + OTEL_DOTNET_AUTO_LOGS_CONSOLE_EXPORTER_ENABLED=true + OTEL_SERVICE_NAME=RollDiceService + . $HOME/.otel-dotnet-auto/instrument.sh + ``` + + {{% /tab %}} {{% tab PowerShell - Windows %}} + + ```powershell + $env:OTEL_TRACES_EXPORTER="none" + $env:OTEL_METRICS_EXPORTER="none" + $env:OTEL_LOGS_EXPORTER="none" + $env:OTEL_DOTNET_AUTO_TRACES_CONSOLE_EXPORTER_ENABLED="true" + $env:OTEL_DOTNET_AUTO_METRICS_CONSOLE_EXPORTER_ENABLED="true" + $env:OTEL_DOTNET_AUTO_LOGS_CONSOLE_EXPORTER_ENABLED="true" + Register-OpenTelemetryForCurrentSession -OTelServiceName "RollDiceService" + ``` + + {{% /tab %}} {{< /tabpane >}} + +4. Run your **application** once again: + + ```sh + dotnet run + ``` + + Note the output from the `dotnet run`. + +5. From _another_ terminal, send a request using `curl`: + + ```sh + curl localhost:8080/rolldice + ``` + +6. After about 30 sec, stop the server process. + +At this point, you should see trace and log output from the server and client +that looks something like this (output is line-wrapped for readability): + +
+Traces and Logs + +```log +LogRecord.Timestamp: 2023-08-14T06:44:53.9279186Z +LogRecord.TraceId: 3961d22b5f90bf7662ad4933318743fe +LogRecord.SpanId: 93d5fcea422ff0ac +LogRecord.TraceFlags: Recorded +LogRecord.CategoryName: simple-dotnet +LogRecord.LogLevel: Information +LogRecord.StateValues (Key:Value): + result: 1 + OriginalFormat (a.k.a Body): Anonymous player is rolling the dice: {result} + +Resource associated with LogRecord: +service.name: simple-dotnet +telemetry.auto.version: 0.7.0 +telemetry.sdk.name: opentelemetry +telemetry.sdk.language: dotnet +telemetry.sdk.version: 1.4.0.802 + +info: simple-dotnet[0] + Anonymous player is rolling the dice: 1 +Activity.TraceId: 3961d22b5f90bf7662ad4933318743fe +Activity.SpanId: 93d5fcea422ff0ac +Activity.TraceFlags: Recorded +Activity.ActivitySourceName: OpenTelemetry.Instrumentation.AspNetCore +Activity.DisplayName: /rolldice +Activity.Kind: Server +Activity.StartTime: 2023-08-14T06:44:53.9278162Z +Activity.Duration: 00:00:00.0049754 +Activity.Tags: + net.host.name: localhost + net.host.port: 8080 + http.method: GET + http.scheme: http + http.target: /rolldice + http.url: http://localhost:8080/rolldice + http.flavor: 1.1 + http.user_agent: curl/8.0.1 + http.status_code: 200 +Resource associated with Activity: + service.name: simple-dotnet + telemetry.auto.version: 0.7.0 + telemetry.sdk.name: opentelemetry + telemetry.sdk.language: dotnet + telemetry.sdk.version: 1.4.0.802 +``` + +
+ +Also when stopping the server, you should see an output of all the metrics +collected (sample excerpt shown): + +
+Metrics + +```log +Export process.runtime.dotnet.gc.collections.count, Number of garbage collections that have occurred since process start., Meter: OpenTelemetry.Instrumentation.Runtime/1.1.0.2 +(2023-08-14T06:12:05.8500776Z, 2023-08-14T06:12:23.7750288Z] generation: gen2 LongSum +Value: 2 +(2023-08-14T06:12:05.8500776Z, 2023-08-14T06:12:23.7750288Z] generation: gen1 LongSum +Value: 2 +(2023-08-14T06:12:05.8500776Z, 2023-08-14T06:12:23.7750288Z] generation: gen0 LongSum +Value: 6 + +... + +Export http.client.duration, Measures the duration of outbound HTTP requests., Unit: ms, Meter: OpenTelemetry.Instrumentation.Http/1.0.0.0 +(2023-08-14T06:12:06.2661140Z, 2023-08-14T06:12:23.7750388Z] http.flavor: 1.1 http.method: POST http.scheme: https http.status_code: 200 net.peer.name: dc.services.visualstudio.com Histogram +Value: Sum: 1330.4766000000002 Count: 5 Min: 50.0333 Max: 465.7936 +(-Infinity,0]:0 +(0,5]:0 +(5,10]:0 +(10,25]:0 +(25,50]:0 +(50,75]:2 +(75,100]:0 +(100,250]:0 +(250,500]:3 +(500,750]:0 +(750,1000]:0 +(1000,2500]:0 +(2500,5000]:0 +(5000,7500]:0 +(7500,10000]:0 +(10000,+Infinity]:0 +``` + +
+ +## What next? + +For more: + +- To configure exporters, samplers, resources and more, see + [Configuration and settings](../config) +- See the list of [available instrumentations](../instrumentations) +- If you want to combine automatic and manual instrumentation, learn how you + [can create custom traces and metrics](../custom) +- If you face any issues, check the [Troubleshooting Guide](../troubleshooting) + +[traces]: /docs/concepts/signals/traces/ +[metrics]: /docs/concepts/signals/metrics/ +[logs]: /docs/concepts/signals/logs/ +[configure .NET Automatic Instrumentation]: ../config +[console exporter]: + https://github.com/open-telemetry/opentelemetry-dotnet-instrumentation/blob/main/docs/config.md#internal-logs +[releases]: + https://github.com/open-telemetry/opentelemetry-dotnet-instrumentation/releases diff --git a/content/en/docs/instrumentation/net/automatic/instrumentations.md b/content/en/docs/instrumentation/net/automatic/instrumentations.md index 8f17cbfd5905..7804c030859a 100644 --- a/content/en/docs/instrumentation/net/automatic/instrumentations.md +++ b/content/en/docs/instrumentation/net/automatic/instrumentations.md @@ -41,35 +41,39 @@ is the case-sensitive name of the instrumentation. stable, but particular instrumentation are in Experimental status due to lack of stable semantic convention. -| ID | Instrumented library | Supported versions | Instrumentation type | Status | -| --------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | -------------------- | --------------------------------------------------------- | -| `ASPNET` | ASP.NET (.NET Framework) MVC / WebApi \[1\] **Not supported on .NET** | \* | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `ASPNETCORE` | ASP.NET Core **Not supported on .NET Framework** | \* | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `AZURE` | [Azure SDK](https://azure.github.io/azure-sdk/releases/latest/index.html) | \[2\] | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `ELASTICSEARCH` | [Elastic.Clients.Elasticsearch](https://www.nuget.org/packages/Elastic.Clients.Elasticsearch) | ≥8.0.0 & < 8.10.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `ELASTICTRANSPORT` | [Elastic.Transport](https://www.nuget.org/packages/Elastic.Transport) | ≥0.4.16 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `ENTITYFRAMEWORKCORE` | [Microsoft.EntityFrameworkCore](https://www.nuget.org/packages/) **Not supported on .NET Framework** | ≥6.0.12 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `GRAPHQL` | [GraphQL](https://www.nuget.org/packages/GraphQL) **Not supported on .NET Framework** | ≥7.5.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `GRPCNETCLIENT` | [Grpc.Net.Client](https://www.nuget.org/packages/Grpc.Net.Client) | ≥2.52.0 & < 3.0.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `HTTPCLIENT` | [System.Net.Http.HttpClient](https://docs.microsoft.com/dotnet/api/system.net.http.httpclient) and [System.Net.HttpWebRequest](https://docs.microsoft.com/dotnet/api/system.net.httpwebrequest) | \* | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `QUARTZ` | [Quartz](https://www.nuget.org/packages/Quartz) **Not supported on .NET Framework 4.7.1 and older** | ≥3.4.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `MASSTRANSIT` | [MassTransit](https://www.nuget.org/packages/MassTransit) **Not supported on .NET Framework** | ≥8.0.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `MONGODB` | [MongoDB.Driver.Core](https://www.nuget.org/packages/MongoDB.Driver.Core) | ≥2.13.3 & < 3.0.0 | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `MYSQLCONNECTOR` | [MySqlConnector](https://www.nuget.org/packages/MySqlConnector) | ≥2.0.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `MYSQLDATA` | [MySql.Data](https://www.nuget.org/packages/MySql.Data) **Not supported on .NET Framework** | ≥8.1.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `NPGSQL` | [Npgsql](https://www.nuget.org/packages/Npgsql) | ≥6.0.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `NSERVICEBUS` | [NServiceBus](https://www.nuget.org/packages/NServiceBus) | ≥8.0.0 | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `SQLCLIENT` | [Microsoft.Data.SqlClient](https://www.nuget.org/packages/Microsoft.Data.SqlClient) and [System.Data.SqlClient](https://www.nuget.org/packages/System.Data.SqlClient) | \* \[3\] | source | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `STACKEXCHANGEREDIS` | [StackExchange.Redis](https://www.nuget.org/packages/StackExchange.Redis) **Not supported on .NET Framework** | ≥2.0.405 < 3.0.0 | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `WCFCLIENT` | WCF | \* | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | -| `WCFSERVICE` | WCF **Not supported on .NET**. | \* | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | +| ID | Instrumented library | Supported versions | Instrumentation type | Status | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------ | -------------------- | --------------------------------------------------------- | +| `ASPNET` | ASP.NET (.NET Framework) MVC / WebApi \[1\] **Not supported on .NET** | \* | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `ASPNETCORE` | ASP.NET Core **Not supported on .NET Framework** | \* | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `AZURE` | [Azure SDK](https://azure.github.io/azure-sdk/releases/latest/index.html) | \[2\] | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `ELASTICSEARCH` | [Elastic.Clients.Elasticsearch](https://www.nuget.org/packages/Elastic.Clients.Elasticsearch) | \[3\] | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `ELASTICTRANSPORT` | [Elastic.Transport](https://www.nuget.org/packages/Elastic.Transport) | ≥0.4.16 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `ENTITYFRAMEWORKCORE` | [Microsoft.EntityFrameworkCore](https://www.nuget.org/packages/) **Not supported on .NET Framework** | ≥6.0.12 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `GRAPHQL` | [GraphQL](https://www.nuget.org/packages/GraphQL) **Not supported on .NET Framework** | ≥7.5.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `GRPCNETCLIENT` | [Grpc.Net.Client](https://www.nuget.org/packages/Grpc.Net.Client) | ≥2.52.0 & < 3.0.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `HTTPCLIENT` | [System.Net.Http.HttpClient](https://docs.microsoft.com/dotnet/api/system.net.http.httpclient) and [System.Net.HttpWebRequest](https://docs.microsoft.com/dotnet/api/system.net.httpwebrequest) | \* | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `QUARTZ` | [Quartz](https://www.nuget.org/packages/Quartz) **Not supported on .NET Framework 4.7.1 and older** | ≥3.4.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `MASSTRANSIT` | [MassTransit](https://www.nuget.org/packages/MassTransit) **Not supported on .NET Framework** | ≥8.0.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `MONGODB` | [MongoDB.Driver.Core](https://www.nuget.org/packages/MongoDB.Driver.Core) | ≥2.13.3 & < 3.0.0 | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `MYSQLCONNECTOR` | [MySqlConnector](https://www.nuget.org/packages/MySqlConnector) | ≥2.0.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `MYSQLDATA` | [MySql.Data](https://www.nuget.org/packages/MySql.Data) **Not supported on .NET Framework** | ≥8.1.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `NPGSQL` | [Npgsql](https://www.nuget.org/packages/Npgsql) | ≥6.0.0 | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `NSERVICEBUS` | [NServiceBus](https://www.nuget.org/packages/NServiceBus) | ≥8.0.0 | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `SQLCLIENT` | [Microsoft.Data.SqlClient](https://www.nuget.org/packages/Microsoft.Data.SqlClient), [System.Data.SqlClient](https://www.nuget.org/packages/System.Data.SqlClient) and `System.Data` (shipped with .NET Framework) | \* \[4\] | source | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `STACKEXCHANGEREDIS` | [StackExchange.Redis](https://www.nuget.org/packages/StackExchange.Redis) **Not supported on .NET Framework** | ≥2.0.405 < 3.0.0 | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `WCFCLIENT` | WCF | \* | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | +| `WCFSERVICE` | WCF **Not supported on .NET**. | \* | source & bytecode | [Experimental](/docs/specs/otel/versioning-and-stability) | \[1\]: Only integrated pipeline mode is supported. \[2\]: `Azure.` prefixed packages, released after October 1, 2021. -\[3\]: Microsoft.Data.SqlClient v3.\* is not supported on .NET Framework, due to -[issue](https://github.com/open-telemetry/opentelemetry-dotnet/issues/4243). +\[3\]: `Elastic.Clients.Elasticsearch` version ≥8.0.0 and <8.10.0. Version +≥8.10.0 is supported by `Elastic.Transport` instrumentation. + +\[4\]: `Microsoft.Data.SqlClient` v3.\* is not supported on .NET Framework, due +to [issue](https://github.com/open-telemetry/opentelemetry-dotnet/issues/4243). +`System.Data.SqlClient` is supported from version 4.8.5. ## Metrics instrumentations diff --git a/content/en/docs/instrumentation/net/getting-started.md b/content/en/docs/instrumentation/net/getting-started.md index 120a7e34d41d..82a86a78f632 100644 --- a/content/en/docs/instrumentation/net/getting-started.md +++ b/content/en/docs/instrumentation/net/getting-started.md @@ -7,6 +7,9 @@ weight: 10 This page will show you how to get started with OpenTelemetry in .NET. +If you are looking for a way to automatically instrument your application, check +out [this guide](/docs/instrumentation/net/automatic/getting-started/). + You will learn how you can instrument a simple .NET application, in such a way that [traces][], [metrics][] and [logs][] are emitted to the console. diff --git a/content/en/docs/instrumentation/php/manual.md b/content/en/docs/instrumentation/php/manual.md index 5cc5a7fde021..6f2ccc699705 100644 --- a/content/en/docs/instrumentation/php/manual.md +++ b/content/en/docs/instrumentation/php/manual.md @@ -61,7 +61,7 @@ use OpenTelemetry\Contrib\Otlp\SpanExporter; use OpenTelemetry\SDK\Common\Attribute\Attributes; use OpenTelemetry\SDK\Common\Export\Stream\StreamTransportFactory; use OpenTelemetry\SDK\Logs\LoggerProvider; -use OpenTelemetry\SDK\Logs\Processor\SimpleLogsProcessor; +use OpenTelemetry\SDK\Logs\Processor\SimpleLogRecordProcessor; use OpenTelemetry\SDK\Metrics\MeterProvider; use OpenTelemetry\SDK\Metrics\MetricReader\ExportingReader; use OpenTelemetry\SDK\Resource\ResourceInfo; @@ -111,7 +111,7 @@ $tracerProvider = TracerProvider::builder() $loggerProvider = LoggerProvider::builder() ->setResource($resource) ->addLogRecordProcessor( - new SimpleLogsProcessor($logExporter) + new SimpleLogRecordProcessor($logExporter) ) ->build(); @@ -566,7 +566,7 @@ use OpenTelemetry\API\Logs\LogRecord; use OpenTelemetry\Contrib\Otlp\LogsExporter; use OpenTelemetry\SDK\Common\Export\Stream\StreamTransportFactory; use OpenTelemetry\SDK\Logs\LoggerProvider; -use OpenTelemetry\SDK\Logs\Processor\SimpleLogsProcessor; +use OpenTelemetry\SDK\Logs\Processor\SimpleLogRecordProcessor; use OpenTelemetry\SDK\Resource\ResourceInfoFactory; require 'vendor/autoload.php'; @@ -576,7 +576,7 @@ $exporter = new LogsExporter( ); $loggerProvider = LoggerProvider::builder() - ->addLogRecordProcessor(new SimpleLogsProcessor($exporter)) + ->addLogRecordProcessor(new SimpleLogRecordProcessor($exporter)) ->setResource(ResourceInfoFactory::emptyResource()) ->build(); ``` diff --git a/content/en/docs/instrumentation/python/getting-started.md b/content/en/docs/instrumentation/python/getting-started.md index adfa68030f42..aa2e9ca1a975 100644 --- a/content/en/docs/instrumentation/python/getting-started.md +++ b/content/en/docs/instrumentation/python/getting-started.md @@ -2,14 +2,15 @@ title: Getting Started description: Get telemetry for your app in less than 5 minutes! # prettier-ignore -cSpell:ignore: debugexporter diceroller distro loglevel randint rolldice rollspan venv +cSpell:ignore: debugexporter diceroller distro loglevel maxlen randint rolldice rollspan venv weight: 10 --- This page will show you how to get started with OpenTelemetry in Python. You will learn how you can instrument a simple application automatically, in -such a way that [traces][] and [metrics][] are emitted to the console. +such a way that [traces][], [metrics][], and [logs][] are emitted to the +console. ## Prerequisites @@ -42,7 +43,7 @@ source ./bin/activate Now install Flask: ```shell -pip3 install 'flask<3' +pip install 'flask<3' ``` ### Create and launch an HTTP Server @@ -51,13 +52,22 @@ Create a file `app.py` and add the following code to it: ```python from random import randint -from flask import Flask +from flask import Flask, request +import logging app = Flask(__name__) +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) @app.route("/rolldice") def roll_dice(): - return str(roll()) + player = request.args.get('player', default = None, type = str) + result = str(roll()) + if player: + logger.warn("{} is rolling the dice: {}", player, result) + else: + logger.warn("Anonymous player is rolling the dice: %s", result) + return result def roll(): return randint(1, 6) @@ -99,9 +109,12 @@ You can now run your instrumented app with `opentelemetry-instrument` and have it print to the console for now: ```shell +export OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true opentelemetry-instrument \ --traces_exporter console \ --metrics_exporter console \ + --logs_exporter console \ + --service_name dice-server \ flask run -p 8080 ``` @@ -114,45 +127,60 @@ as the following: ```json { - "name": "/rolldice", - "context": { - "trace_id": "0xdcd253b9501348b63369d83219da0b14", - "span_id": "0x886c05bc23d2250e", - "trace_state": "[]" - }, - "kind": "SpanKind.SERVER", - "parent_id": null, - "start_time": "2022-04-27T23:53:11.533109Z", - "end_time": "2022-04-27T23:53:11.534097Z", - "status": { - "status_code": "UNSET" - }, - "attributes": { - "http.method": "GET", - "http.server_name": "127.0.0.1", - "http.scheme": "http", - "net.host.port": 5000, - "http.host": "localhost:5000", - "http.target": "/rolldice", - "net.peer.ip": "127.0.0.1", - "http.user_agent": "curl/7.68.0", - "net.peer.port": 52538, - "http.flavor": "1.1", - "http.route": "/rolldice", - "http.status_code": 200 - }, - "events": [], - "links": [], - "resource": { + "name": "/rolldice", + "context": { + "trace_id": "0xdb1fc322141e64eb84f5bd8a8b1c6d1f", + "span_id": "0x5c2b0f851030d17d", + "trace_state": "[]" + }, + "kind": "SpanKind.SERVER", + "parent_id": null, + "start_time": "2023-10-10T08:14:32.630332Z", + "end_time": "2023-10-10T08:14:32.631523Z", + "status": { + "status_code": "UNSET" + }, "attributes": { - "telemetry.sdk.language": "python", - "telemetry.sdk.name": "opentelemetry", - "telemetry.sdk.version": "1.14.0", - "telemetry.auto.version": "0.35b0", - "service.name": "unknown_service" + "http.method": "GET", + "http.server_name": "127.0.0.1", + "http.scheme": "http", + "net.host.port": 8080, + "http.host": "localhost:8080", + "http.target": "/rolldice?rolls=12", + "net.peer.ip": "127.0.0.1", + "http.user_agent": "curl/8.1.2", + "net.peer.port": 58419, + "http.flavor": "1.1", + "http.route": "/rolldice", + "http.status_code": 200 }, - "schema_url": "" - } + "events": [], + "links": [], + "resource": { + "attributes": { + "telemetry.sdk.language": "python", + "telemetry.sdk.name": "opentelemetry", + "telemetry.sdk.version": "1.17.0", + "service.name": "dice-server", + "telemetry.auto.version": "0.38b0" + }, + "schema_url": "" + } +} +{ + "body": "Anonymous player is rolling the dice: 3", + "severity_number": "", + "severity_text": "WARNING", + "attributes": { + "otelSpanID": "5c2b0f851030d17d", + "otelTraceID": "db1fc322141e64eb84f5bd8a8b1c6d1f", + "otelServiceName": "dice-server" + }, + "timestamp": "2023-10-10T08:14:32.631195Z", + "trace_id": "0xdb1fc322141e64eb84f5bd8a8b1c6d1f", + "span_id": "0x5c2b0f851030d17d", + "trace_flags": 1, + "resource": "BoundedAttributes({'telemetry.sdk.language': 'python', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.17.0', 'service.name': 'dice-server', 'telemetry.auto.version': '0.38b0'}, maxlen=None)" } ``` @@ -160,6 +188,9 @@ as the following: The generated span tracks the lifetime of a request to the `/rolldice` route. +The log line emitted during the request contains the same trace ID and span ID +and is exported to the console via the log exporter. + Send a few more requests to the endpoint, and then either wait for a little bit or terminate the app and you'll see metrics in the console output, such as the following: @@ -269,12 +300,11 @@ First, modify `app.py` to include code that initializes a tracer and uses it to create a trace that's a child of the one that's automatically generated: ```python -# These are the necessary import declarations -from opentelemetry import trace - from random import randint from flask import Flask +from opentelemetry import trace + # Acquire a tracer tracer = trace.get_tracer("diceroller.tracer") @@ -295,9 +325,12 @@ def roll(): Now run the app again: ```shell +export OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true opentelemetry-instrument \ --traces_exporter console \ --metrics_exporter console \ + --logs_exporter console \ + --service_name dice-server \ flask run -p 8080 ``` @@ -312,19 +345,19 @@ automatically created one: { "name": "roll", "context": { - "trace_id": "0x48da59d77e13beadd1a961dc8fcaa74e", - "span_id": "0x40c38b50bc8da6b7", + "trace_id": "0x6f781c83394ed2f33120370a11fced47", + "span_id": "0x623321c35b8fa837", "trace_state": "[]" }, "kind": "SpanKind.INTERNAL", - "parent_id": "0x84f8c5d92970d94f", - "start_time": "2022-04-28T00:07:55.892307Z", - "end_time": "2022-04-28T00:07:55.892331Z", + "parent_id": "0x09abe52faf1d80d5", + "start_time": "2023-10-10T08:18:28.679261Z", + "end_time": "2023-10-10T08:18:28.679560Z", "status": { "status_code": "UNSET" }, "attributes": { - "roll.value": 4 + "roll.value": "6" }, "events": [], "links": [], @@ -332,9 +365,9 @@ automatically created one: "attributes": { "telemetry.sdk.language": "python", "telemetry.sdk.name": "opentelemetry", - "telemetry.sdk.version": "1.14.0", - "telemetry.auto.version": "0.35b0", - "service.name": "unknown_service" + "telemetry.sdk.version": "1.17.0", + "service.name": "dice-server", + "telemetry.auto.version": "0.38b0" }, "schema_url": "" } @@ -342,14 +375,14 @@ automatically created one: { "name": "/rolldice", "context": { - "trace_id": "0x48da59d77e13beadd1a961dc8fcaa74e", - "span_id": "0x84f8c5d92970d94f", + "trace_id": "0x6f781c83394ed2f33120370a11fced47", + "span_id": "0x09abe52faf1d80d5", "trace_state": "[]" }, "kind": "SpanKind.SERVER", "parent_id": null, - "start_time": "2022-04-28T00:07:55.891500Z", - "end_time": "2022-04-28T00:07:55.892552Z", + "start_time": "2023-10-10T08:18:28.678348Z", + "end_time": "2023-10-10T08:18:28.679677Z", "status": { "status_code": "UNSET" }, @@ -357,12 +390,12 @@ automatically created one: "http.method": "GET", "http.server_name": "127.0.0.1", "http.scheme": "http", - "net.host.port": 5000, - "http.host": "localhost:5000", - "http.target": "/rolldice", + "net.host.port": 8080, + "http.host": "localhost:8080", + "http.target": "/rolldice?rolls=12", "net.peer.ip": "127.0.0.1", - "http.user_agent": "curl/7.68.0", - "net.peer.port": 53824, + "http.user_agent": "curl/8.1.2", + "net.peer.port": 58485, "http.flavor": "1.1", "http.route": "/rolldice", "http.status_code": 200 @@ -373,9 +406,9 @@ automatically created one: "attributes": { "telemetry.sdk.language": "python", "telemetry.sdk.name": "opentelemetry", - "telemetry.sdk.version": "1.14.0", - "telemetry.auto.version": "0.35b0", - "service.name": "unknown_service" + "telemetry.sdk.version": "1.17.0", + "service.name": "dice-server", + "telemetry.auto.version": "0.38b0" }, "schema_url": "" } @@ -399,8 +432,10 @@ from opentelemetry import trace from opentelemetry import metrics from random import randint -from flask import Flask +from flask import Flask, request +import logging +# Acquire a tracer tracer = trace.get_tracer("diceroller.tracer") # Acquire a meter. meter = metrics.get_meter("diceroller.meter") @@ -412,26 +447,37 @@ roll_counter = meter.create_counter( ) app = Flask(__name__) +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) @app.route("/rolldice") def roll_dice(): - return str(roll()) + # This creates a new span that's the child of the current one + with tracer.start_as_current_span("roll") as roll_span: + player = request.args.get('player', default = None, type = str) + result = str(roll()) + roll_span.set_attribute("roll.value", result) + # This adds 1 to the counter for the given roll value + roll_counter.add(1, {"roll.value": result}) + if player: + logger.warn("{} is rolling the dice: {}", player, result) + else: + logger.warn("Anonymous player is rolling the dice: %s", result) + return result def roll(): - with tracer.start_as_current_span("roll") as rollspan: - res = randint(1, 6) - rollspan.set_attribute("roll.value", res) - # This adds 1 to the counter for the given roll value - roll_counter.add(1, {"roll.value": res}) - return res + return randint(1, 6) ``` Now run the app again: ```shell +export OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true opentelemetry-instrument \ --traces_exporter console \ --metrics_exporter console \ + --logs_exporter console \ + --service_name dice-server \ flask run -p 8080 ``` @@ -449,18 +495,85 @@ emitted to the console, with separate counts for each roll value: "attributes": { "telemetry.sdk.language": "python", "telemetry.sdk.name": "opentelemetry", - "telemetry.sdk.version": "1.12.0rc1", - "telemetry.auto.version": "0.31b0", - "service.name": "unknown_service" + "telemetry.sdk.version": "1.17.0", + "service.name": "dice-server", + "telemetry.auto.version": "0.38b0" }, "schema_url": "" }, "scope_metrics": [ { "scope": { - "name": "app", + "name": "opentelemetry.instrumentation.flask", + "version": "0.38b0", + "schema_url": "" + }, + "metrics": [ + { + "name": "http.server.active_requests", + "description": "measures the number of concurrent HTTP requests that are currently in-flight", + "unit": "requests", + "data": { + "data_points": [ + { + "attributes": { + "http.method": "GET", + "http.host": "localhost:8080", + "http.scheme": "http", + "http.flavor": "1.1", + "http.server_name": "127.0.0.1" + }, + "start_time_unix_nano": 1696926005694857000, + "time_unix_nano": 1696926063549782000, + "value": 0 + } + ], + "aggregation_temporality": 2, + "is_monotonic": false + } + }, + { + "name": "http.server.duration", + "description": "measures the duration of the inbound HTTP request", + "unit": "ms", + "data": { + "data_points": [ + { + "attributes": { + "http.method": "GET", + "http.host": "localhost:8080", + "http.scheme": "http", + "http.flavor": "1.1", + "http.server_name": "127.0.0.1", + "net.host.port": 8080, + "http.status_code": 200 + }, + "start_time_unix_nano": 1696926005695798000, + "time_unix_nano": 1696926063549782000, + "count": 7, + "sum": 6, + "bucket_counts": [ + 1, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + "explicit_bounds": [ + 0.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, + 750.0, 1000.0, 2500.0, 5000.0, 7500.0, 10000.0 + ], + "min": 0, + "max": 1 + } + ], + "aggregation_temporality": 2 + } + } + ], + "schema_url": "" + }, + { + "scope": { + "name": "diceroller.meter", "version": "", - "schema_url": null + "schema_url": "" }, "metrics": [ { @@ -471,42 +584,42 @@ emitted to the console, with separate counts for each roll value: "data_points": [ { "attributes": { - "roll.value": 4 + "roll.value": "5" }, - "start_time_unix_nano": 1654790325350232600, - "time_unix_nano": 1654790332211598800, + "start_time_unix_nano": 1696926005695491000, + "time_unix_nano": 1696926063549782000, "value": 3 }, { "attributes": { - "roll.value": 6 + "roll.value": "6" }, - "start_time_unix_nano": 1654790325350232600, - "time_unix_nano": 1654790332211598800, - "value": 4 + "start_time_unix_nano": 1696926005695491000, + "time_unix_nano": 1696926063549782000, + "value": 1 }, { "attributes": { - "roll.value": 5 + "roll.value": "1" }, - "start_time_unix_nano": 1654790325350232600, - "time_unix_nano": 1654790332211598800, + "start_time_unix_nano": 1696926005695491000, + "time_unix_nano": 1696926063549782000, "value": 1 }, { "attributes": { - "roll.value": 1 + "roll.value": "3" }, - "start_time_unix_nano": 1654790325350232600, - "time_unix_nano": 1654790332211598800, - "value": 2 + "start_time_unix_nano": 1696926005695491000, + "time_unix_nano": 1696926063549782000, + "value": 1 }, { "attributes": { - "roll.value": 3 + "roll.value": "4" }, - "start_time_unix_nano": 1654790325350232600, - "time_unix_nano": 1654790332211598800, + "start_time_unix_nano": 1696926005695491000, + "time_unix_nano": 1696926063549782000, "value": 1 } ], @@ -515,7 +628,7 @@ emitted to the console, with separate counts for each roll value: } } ], - "schema_url": null + "schema_url": "" } ], "schema_url": "" @@ -567,6 +680,10 @@ service: receivers: [otlp] exporters: [debug] processors: [batch] + logs: + receivers: [otlp] + exporters: [debug] + processors: [batch] ``` Then run the docker command to acquire and run the collector based on this @@ -600,7 +717,8 @@ and default to OTLP export when it's run next. Run the application like before, but don't export to the console: ```shell -opentelemetry-instrument flask run -p 8080 +export OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true +opentelemetry-instrument --logs_exporter otlp flask run -p 8080 ``` By default, `opentelemetry-instrument` exports traces and metrics over OTLP/gRPC @@ -710,3 +828,4 @@ If you'd like to explore a more complex example, take a look at the [traces]: /docs/concepts/signals/traces/ [metrics]: /docs/concepts/signals/metrics/ +[logs]: /docs/concepts/signals/logs/ diff --git a/content/en/docs/kubernetes/getting-started.md b/content/en/docs/kubernetes/getting-started.md index 0bc976b3fb0e..4033cebf5d9a 100644 --- a/content/en/docs/kubernetes/getting-started.md +++ b/content/en/docs/kubernetes/getting-started.md @@ -90,7 +90,7 @@ This instance of the collector will use the following components: Let's break these down. -**OTLP Receiver** +### OTLP Receiver The [OTLP Receiver](https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver/otlpreceiver) @@ -105,7 +105,7 @@ a node to emit their traces, metrics, and logs to a collector running on the same node. This keeps network interactions simple and allows easy correlation of Kubernetes metadata using the `k8sattributes` processor. -**Kubernetes Attributes Processor** +### Kubernetes Attributes Processor The [Kubernetes Attributes Processor](../collector/components/#kubernetes-attributes-processor) @@ -117,7 +117,7 @@ Kubernetes context to your telemetry, the Kubernetes Attributes Processor lets you correlate your application's traces, metrics, and logs signals with your Kubernetes telemetry, such as pod metrics and traces. -**Kubeletstats Receiver** +### Kubeletstats Receiver The [Kubeletstats Receiver](../collector/components/#kubeletstats-receiver) is the receiver that gathers metrics about the node. It will gather metrics like @@ -127,7 +127,7 @@ using the Kubernetes Attributes Processor, we'll be able to correlate our application traces, metrics, and logs with the metrics produced by the Kubeletstats Receiver. -**Filelog Receiver** +### Filelog Receiver The [Filelog Receiver](../collector/components/#filelog-receiver) will collect logs written to stdout/stderr by tailing the logs Kubernetes writes to @@ -209,7 +209,7 @@ This instance of the Collector will use the following components: Let's break these down. -**Kubernetes Cluster Receiver** +### Kubernetes Cluster Receiver The [Kubernetes Cluster Receiver](../collector/components/#kubernetes-cluster-receiver) @@ -217,7 +217,7 @@ is the Collector's solution for collecting metrics about the state of the cluster as a whole. This receiver can gather metrics about node conditions, pod phases, container restarts, available and desired deployments, and more. -**Kubernetes Objects Receiver** +### Kubernetes Objects Receiver The [Kubernetes Objects Receiver](../collector/components/#kubernetes-objects-receiver) diff --git a/content/en/docs/kubernetes/helm/operator.md b/content/en/docs/kubernetes/helm/operator.md index 1ec7e17e852a..b82816a4a293 100644 --- a/content/en/docs/kubernetes/helm/operator.md +++ b/content/en/docs/kubernetes/helm/operator.md @@ -23,7 +23,7 @@ following commands: helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts helm install my-opentelemetry-operator open-telemetry/opentelemetry-operator \ --set admissionWebhooks.certManager.enabled=false \ - --set admissionWebhooks.certManager.autoGenerateCert=true + --set admissionWebhooks.certManager.autoGenerateCert.enabled=true ``` This will install an OpenTelemetry Operator with a self-signed certificate and @@ -51,12 +51,12 @@ generate/configure the required TLS certificate. requires the installation of cert-manager. - You can use an automatically generated self-signed certificate by setting `admissionWebhooks.certManager.enabled` to `false` and - `admissionWebhooks.autoGenerateCert` to `true`. Helm will create a self-signed - cert and a secret for you. + `admissionWebhooks.autoGenerateCert.enabled` to `true`. Helm will create a + self-signed cert and a secret for you. - You can use your own generated self-signed certificate by setting both `admissionWebhooks.certManager.enabled` and - `admissionWebhooks.autoGenerateCert` to `false`. You should provide the - necessary values to `admissionWebhooks.cert_file`, + `admissionWebhooks.autoGenerateCert.enabled` to `false`. You should provide + the necessary values to `admissionWebhooks.cert_file`, `admissionWebhooks.key_file`, and `admissionWebhooks.ca_file`. - You can side-load custom webhooks and certificate by disabling `.Values.admissionWebhooks.create` and `admissionWebhooks.certManager.enabled` diff --git a/content/en/docs/kubernetes/operator/automatic.md b/content/en/docs/kubernetes/operator/automatic.md index ed88371a6836..eb52b9afe428 100644 --- a/content/en/docs/kubernetes/operator/automatic.md +++ b/content/en/docs/kubernetes/operator/automatic.md @@ -142,7 +142,7 @@ endpoint must be able to receive OTLP over `http/protobuf`. Therefore, the example uses `http://demo-collector:4318`, which will connect to the `http` port of the `otlpreceiver` of the Collector created in the previous step. -**Excluding auto-instrumentation** +#### Excluding auto-instrumentation {#dotnet-excluding-auto-instrumentation} By default, the .NET auto-instrumentation ships with [many instrumentation libraries](https://github.com/open-telemetry/opentelemetry-dotnet-instrumentation/blob/main/docs/config.md#instrumentations). @@ -174,7 +174,7 @@ spec: value: false ``` -**Learn more** +#### Learn more {#dotnet-learn-more} For more details, see [.NET Auto Instrumentation docs](/docs/instrumentation/net/automatic/). @@ -208,7 +208,7 @@ must be able to receive OTLP over `grpc`. Therefore, the example uses `http://demo-collector:4317`, which connects to the `grpc` port of the otlpreceiver of the Collector created in the previous step. -**Excluding auto-instrumentation** +#### Excluding auto-instrumentation {#java-excluding-auto-instrumentation} By default, the Java auto-instrumentation ships with [many instrumentation libraries](/docs/instrumentation/java/automatic/#supported-libraries-frameworks-application-services-and-jvms). @@ -244,7 +244,7 @@ spec: value: false ``` -**Learn more** +#### Learn more {#java-learn-more} For more details, see [Java agent Configuration](/docs/instrumentation/java/automatic/agent-config/). @@ -278,7 +278,7 @@ must be able to receive OTLP over `grpc`. Therefore, the example uses `http://demo-collector:4317`, which connects to the `grpc` port of the `otlpreceiver` of the Collector created in the previous step. -**Excluding auto-instrumentation** +#### Excluding auto-instrumentation {#js-excluding-auto-instrumentation} By default, the Node.js auto-instrumentation ships with [many instrumentation libraries](https://github.com/open-telemetry/opentelemetry-js-contrib/blob/main/metapackages/auto-instrumentations-node/README.md#supported-instrumentations). @@ -287,7 +287,7 @@ specific packages. If you don't want to use a package included by the default image you must either supply your own image that includes only the packages you want or use manual instrumentation. -**Learn more** +#### Learn more {#js-learn-more} For more details, see [Node.js auto-instrumentation](/docs/instrumentation/js/libraries/#registration). @@ -328,7 +328,7 @@ in the previous step. > Operator you **MUST** set these env variables to `http/protobuf`, or Python > auto-instrumentation will not work. -**Auto-instrumenting Python logs** +#### Auto-instrumenting Python logs By default, Python logs auto-instrumentation is disabled. If you would like to enable this feature, you must to set the `OTEL_LOGS_EXPORTER` and @@ -359,7 +359,7 @@ spec: > Note that `OTEL_LOGS_EXPORTER` must be explicitly set to `otlp_proto_http`, > otherwise it defaults to gRPC. -**Excluding auto-instrumentation** +#### Excluding auto-instrumentation {#python-excluding-auto-instrumentation} By default the Python auto-instrumentation will detect the packages in your Python service and instrument anything it can. This makes instrumentation easy, @@ -389,7 +389,7 @@ spec: instrumentation> ``` -**Learn more** +#### Learn more {#python-learn-more} [See the Python agent Configuration docs for more details.](/docs/instrumentation/python/automatic/agent-config/#disabling-specific-instrumentations) @@ -406,7 +406,7 @@ metadata: name: demo-instrumentation spec: exporter: - endpoint: http://demo-collector:4317 + endpoint: http://demo-collector:4318 propagators: - tracecontext - baggage @@ -417,14 +417,10 @@ EOF ``` By default, the Instrumentation resource that auto-instruments Go services uses -`otlp` with the `grpc` protocol. This means that the configured endpoint must be -able to receive OTLP over `grpc`. Therefore, the example uses -`http://demo-collector:4317`, which connects to the `grpc` port of the -`otlpreceiver` of the Collector created in the previous step. - -> Go auto-instrumentation only supports exporting via gRPC. Setting the protocol -> or exporter to any other value via environment variables will result in silent -> failure. +`otlp` with the `http/protobuf` protocol. This means that the configured +endpoint must be able to receive OTLP over `http/protobuf`. Therefore, the +example uses `http://demo-collector:4318`, which connects to the `http/protobuf` +port of the `otlpreceiver` of the Collector created in the previous step. The Go auto-instrumentation does not support disabling any instrumentation. [See the Go Auto-Instrumentation repository for me details.](https://github.com/open-telemetry/opentelemetry-go-instrumentation) diff --git a/data/ecosystem/integrations.yaml b/data/ecosystem/integrations.yaml index 74912df16b63..6b0b6d45ef3c 100644 --- a/data/ecosystem/integrations.yaml +++ b/data/ecosystem/integrations.yaml @@ -1,4 +1,4 @@ -# cSpell:ignore containerd buildx Quarkus Cerbos flagd flipt KEDA Kyverno Dapr rustup GORM +# cSpell:ignore containerd buildx Quarkus Cerbos flagd flipt KEDA Kyverno Dapr rustup GORM Otterize - name: containerd url: https://containerd.io/ docsUrl: https://github.com/containerd/containerd/blob/main/docs/tracing.md @@ -164,3 +164,13 @@ docsUrl: https://github.com/go-gorm/opentelemetry components: [Go] oss: true +- name: Cloud Foundry + url: https://www.cloudfoundry.org/ + docsUrl: https://github.com/cloudfoundry/cf-deployment/blob/main/operations/experimental/add-otel-collector.yml + components: [Collector] + oss: true +- name: Otterize network mapper + url: https://github.com/otterize/network-mapper + docsUrl: https://docs.otterize.com/reference/configuration/network-mapper/helm-chart#opentelemetry-exporter-parameters + components: [Go] + oss: true diff --git a/data/ecosystem/vendors.yaml b/data/ecosystem/vendors.yaml index 5d7157c8981a..26796cebeb17 100644 --- a/data/ecosystem/vendors.yaml +++ b/data/ecosystem/vendors.yaml @@ -34,6 +34,13 @@ contact: '' oss: false commercial: true +- name: Better Stack + distribution: false + nativeOTLP: true + url: https://betterstack.com/docs/logs/open-telemetry/#2-setup + contact: 'hello@betterstack.com' + oss: false + commercial: true - name: Coralogix distribution: true nativeOTLP: true @@ -91,10 +98,10 @@ oss: false commercial: true - name: Grafana Labs - distribution: false + distribution: true nativeOTLP: true url: 'https://grafana.com/oss/opentelemetry/' - contact: '' + contact: 'https://github.com/jpkrohling/' oss: true commercial: true - name: Helios diff --git a/hugo.yaml b/hugo.yaml index 6cfa3adac621..9ce69128e1ad 100644 --- a/hugo.yaml +++ b/hugo.yaml @@ -2,7 +2,7 @@ baseURL: https://opentelemetry.io title: &title OpenTelemetry description: &desc The OpenTelemetry Project Site -disableKinds: [taxonomy] +disableKinds: [rss, taxonomy] theme: [docsy] disableAliases: true # We do redirects via Netlify's _redirects file enableGitInfo: true @@ -43,7 +43,7 @@ outputFormats: notAlternative: true outputs: - home: [HTML, REDIRECTS, RSS] + home: [HTML, REDIRECTS] params: copyright: >- diff --git a/package.json b/package.json index 07176160efea..3f55465f3415 100644 --- a/package.json +++ b/package.json @@ -88,7 +88,7 @@ "gulp": "^4.0.2", "hugo-extended": "0.120.4", "markdown-link-check": "^3.11.2", - "markdownlint": "^0.31.1", + "markdownlint": "^0.32.1", "postcss-cli": "^10.1.0", "prettier": "^3.0.3", "require-dir": "^1.2.0", @@ -101,7 +101,7 @@ }, "dependencies": { "@opentelemetry/api": "^1.3.0", - "@opentelemetry/auto-instrumentations-web": "^0.33.0", + "@opentelemetry/auto-instrumentations-web": "^0.34.0", "@opentelemetry/context-zone": "^1.8.0", "@opentelemetry/core": "^1.8.0", "@opentelemetry/exporter-trace-otlp-http": "^0.45.1", diff --git a/scripts/content-modules/adjust-pages.pl b/scripts/content-modules/adjust-pages.pl index d07f0c75fa6b..a8b6cb5348f1 100755 --- a/scripts/content-modules/adjust-pages.pl +++ b/scripts/content-modules/adjust-pages.pl @@ -21,7 +21,7 @@ my %versions = qw( spec: 1.27.0 otlp: 1.0.0 - semconv: 1.23.0 + semconv: 1.23.1 ); my $otelSpecVers = $versions{'spec:'}; my $otlpSpecVers = $versions{'otlp:'}; diff --git a/static/refcache.json b/static/refcache.json index ded5d0ac1481..b745b0580dd4 100644 --- a/static/refcache.json +++ b/static/refcache.json @@ -191,6 +191,10 @@ "StatusCode": 200, "LastSeen": "2023-06-29T13:39:27.033525-04:00" }, + "https://betterstack.com/docs/logs/open-telemetry/#2-setup": { + "StatusCode": 200, + "LastSeen": "2023-11-30T09:17:13.941735+01:00" + }, "https://blog.logrocket.com/understanding-schema-stitching-graphql/": { "StatusCode": 200, "LastSeen": "2023-06-29T13:40:18.37311-04:00" @@ -1423,6 +1427,10 @@ "StatusCode": 200, "LastSeen": "2023-06-29T16:18:12.959612-04:00" }, + "https://docs.otterize.com/reference/configuration/network-mapper/helm-chart#opentelemetry-exporter-parameters": { + "StatusCode": 206, + "LastSeen": "2023-11-17T16:32:53.695431+01:00" + }, "https://docs.particular.net/samples/open-telemetry/prometheus-grafana/#reporting-metric-values": { "StatusCode": 200, "LastSeen": "2023-09-06T15:04:50.067993+02:00" @@ -1567,6 +1575,10 @@ "StatusCode": 206, "LastSeen": "2023-06-30T09:35:36.578946-04:00" }, + "https://elastic.co/blog/ecs-elastic-common-schema-otel-opentelemetry-announcement": { + "StatusCode": 200, + "LastSeen": "2023-11-29T05:19:30.624412-05:00" + }, "https://en.cppreference.com/w/cpp/container/set": { "StatusCode": 200, "LastSeen": "2023-06-29T16:10:28.50163-04:00" @@ -2179,6 +2191,10 @@ "StatusCode": 200, "LastSeen": "2023-06-30T09:32:35.041502-04:00" }, + "https://github.com/davidgs": { + "StatusCode": 200, + "LastSeen": "2023-11-16T15:02:09.869589-05:00" + }, "https://github.com/dcxp/opentelemetry-kotlin": { "StatusCode": 200, "LastSeen": "2023-06-30T08:35:34.198403-04:00" @@ -2311,6 +2327,10 @@ "StatusCode": 200, "LastSeen": "2023-06-30T08:33:54.718117-04:00" }, + "https://github.com/hossko": { + "StatusCode": 200, + "LastSeen": "2023-11-17T10:59:22.431031Z" + }, "https://github.com/imandra-ai/ocaml-opentelemetry/": { "StatusCode": 200, "LastSeen": "2023-06-30T08:35:50.599272-04:00" @@ -2427,6 +2447,10 @@ "StatusCode": 200, "LastSeen": "2023-07-06T11:55:26.882609-07:00" }, + "https://github.com/martinkuba": { + "StatusCode": 200, + "LastSeen": "2023-11-04T11:32:20.86746-04:00" + }, "https://github.com/metrico/otel-collector": { "StatusCode": 200, "LastSeen": "2023-10-17T15:13:11.067528+02:00" @@ -2603,6 +2627,10 @@ "StatusCode": 200, "LastSeen": "2023-07-07T13:45:50.007391-07:00" }, + "https://github.com/open-telemetry/opentelemetry-collector-contrib/actions/workflows/load-tests.yml": { + "StatusCode": 200, + "LastSeen": "2023-11-04T11:32:21.428206-04:00" + }, "https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/10116": { "StatusCode": 200, "LastSeen": "2023-06-30T08:43:50.226669-04:00" @@ -3683,6 +3711,22 @@ "StatusCode": 200, "LastSeen": "2023-06-30T09:42:42.642961-04:00" }, + "https://github.com/orishoshan": { + "StatusCode": 200, + "LastSeen": "2023-11-16T15:02:08.644309-05:00" + }, + "https://github.com/otterize": { + "StatusCode": 200, + "LastSeen": "2023-11-16T15:02:10.831177-05:00" + }, + "https://github.com/otterize/network-mapper": { + "StatusCode": 200, + "LastSeen": "2023-11-16T15:02:12.356594-05:00" + }, + "https://github.com/otterize/network-mapper/pull/141": { + "StatusCode": 200, + "LastSeen": "2023-11-16T15:02:13.899173-05:00" + }, "https://github.com/pavolloffay": { "StatusCode": 200, "LastSeen": "2023-06-30T09:26:51.361548-04:00" @@ -4127,6 +4171,22 @@ "StatusCode": 206, "LastSeen": "2023-06-29T16:06:31.115927-04:00" }, + "https://javadoc.io/doc/io.opentelemetry/opentelemetry-exporter-prometheus/latest": { + "StatusCode": 200, + "LastSeen": "2023-11-17T12:19:45.412666+01:00" + }, + "https://javadoc.io/doc/io.opentelemetry/opentelemetry-exporter-prometheus/latest/io/opentelemetry/exporter/prometheus/PrometheusHttpServer.html": { + "StatusCode": 200, + "LastSeen": "2023-11-27T22:03:28.673499+01:00" + }, + "https://javadoc.io/doc/io.opentelemetry/opentelemetry-exporter-zipkin/latest": { + "StatusCode": 200, + "LastSeen": "2023-11-17T12:19:46.0091+01:00" + }, + "https://javadoc.io/doc/io.opentelemetry/opentelemetry-sdk-trace/latest/io/opentelemetry/sdk/trace/export/SpanExporter.html": { + "StatusCode": 200, + "LastSeen": "2023-11-17T12:19:46.216357+01:00" + }, "https://javadoc.io/doc/org.apache.logging.log4j/log4j-api/latest/org.apache.logging.log4j/org/apache/logging/log4j/Logger.html": { "StatusCode": 200, "LastSeen": "2023-08-10T19:39:55.351559+02:00" @@ -4635,10 +4695,18 @@ "StatusCode": 206, "LastSeen": "2023-07-06T12:14:49.802412-07:00" }, + "https://open-telemetry.github.io/opentelemetry-java/benchmarks/": { + "StatusCode": 206, + "LastSeen": "2023-11-04T11:32:21.536067-04:00" + }, "https://open-telemetry.github.io/opentelemetry-js": { "StatusCode": 206, "LastSeen": "2023-06-29T18:46:19.489479-04:00" }, + "https://open-telemetry.github.io/opentelemetry-js/benchmarks/": { + "StatusCode": 206, + "LastSeen": "2023-11-04T11:32:21.613865-04:00" + }, "https://open-telemetry.github.io/opentelemetry-js/benchmarks/data.js": { "StatusCode": 206, "LastSeen": "2023-10-03T11:24:52.148514-07:00" @@ -4687,6 +4755,10 @@ "StatusCode": 206, "LastSeen": "2023-11-03T10:07:17.047111-04:00" }, + "https://opensearch.org/docs/latest/data-prepper/index/": { + "StatusCode": 206, + "LastSeen": "2023-11-17T10:59:20.51098Z" + }, "https://opentelemetry-cpp.readthedocs.io/en/latest/otel_docs/namespace_opentelemetry__metrics.html": { "StatusCode": 200, "LastSeen": "2023-06-29T18:48:28.97257-04:00" @@ -4703,6 +4775,10 @@ "StatusCode": 200, "LastSeen": "2023-06-29T18:48:18.447994-04:00" }, + "https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html": { + "StatusCode": 200, + "LastSeen": "2023-11-17T10:59:23.029679Z" + }, "https://opentelemetry-python.readthedocs.io/en/latest/api/metrics.html": { "StatusCode": 200, "LastSeen": "2023-06-29T18:46:08.928256-04:00" @@ -4711,6 +4787,10 @@ "StatusCode": 200, "LastSeen": "2023-06-29T18:45:58.074821-04:00" }, + "https://opentelemetry-python.readthedocs.io/en/latest/examples/logs/README.html": { + "StatusCode": 200, + "LastSeen": "2023-11-17T10:59:24.626088Z" + }, "https://opentelemetry-python.readthedocs.io/en/latest/index.html": { "StatusCode": 200, "LastSeen": "2023-06-29T18:45:52.763289-04:00" @@ -4779,6 +4859,10 @@ "StatusCode": 206, "LastSeen": "2023-06-30T11:41:53.83222-04:00" }, + "https://otterize.com/": { + "StatusCode": 200, + "LastSeen": "2023-11-16T15:02:10.279947-05:00" + }, "https://packagist.org/": { "StatusCode": 200, "LastSeen": "2023-06-30T09:20:47.395106-04:00" @@ -5183,6 +5267,10 @@ "StatusCode": 206, "LastSeen": "2023-06-30T09:40:22.564248-04:00" }, + "https://research.facebook.com/file/877841159827226/holistic-configuration-management-at-facebook.pdf": { + "StatusCode": 206, + "LastSeen": "2023-11-29T05:30:49.447684-05:00" + }, "https://roadrunner.dev/": { "StatusCode": 200, "LastSeen": "2023-07-31T14:07:56.022667827Z" @@ -6343,6 +6431,10 @@ "StatusCode": 206, "LastSeen": "2023-06-30T16:26:28.214741-04:00" }, + "https://www.otelbin.io/": { + "StatusCode": 200, + "LastSeen": "2023-11-17T10:59:19.546115Z" + }, "https://www.outreachy.org/": { "StatusCode": 200, "LastSeen": "2023-06-30T08:52:07.931813-04:00" diff --git a/themes/docsy b/themes/docsy index fd669b752b7f..6bb4f99d1eab 160000 --- a/themes/docsy +++ b/themes/docsy @@ -1 +1 @@ -Subproject commit fd669b752b7f83fb2eb57cacd2c8f334f86ca7d2 +Subproject commit 6bb4f99d1eab4976fb80d1488c81ba12b1715c05