diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 84b2356fe1a..856b2f3a845 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -22,6 +22,7 @@ jobs: py38: 3.8 py39: 3.9 pypy3: pypy3 + RUN_MATRIX_COMBINATION: ${{ matrix.python-version }}-${{ matrix.package }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false # ensures the entire test matrix is run, even if one permutation fails @@ -30,15 +31,6 @@ jobs: package: ["instrumentation", "core", "exporter"] os: [ ubuntu-latest ] include: - - python-version: py39 - package: "tracecontext" - os: ubuntu-latest - - python-version: py39 - package: "mypy" - os: ubuntu-latest - - python-version: py39 - package: "mypyinstalled" - os: ubuntu-latest # py35-instrumentation segfaults on 18.04 so we instead run on 20.04 - python-version: py35 package: instrumentation @@ -67,14 +59,38 @@ jobs: uses: actions/cache@v2 with: path: .tox - key: tox-cache-${{ matrix.python-version }}-${{ matrix.package }}-${{ matrix.os }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }}-core + key: tox-cache-${{ env.RUN_MATRIX_COMBINATION }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }}-core - name: run tox - run: tox -f ${{ matrix.python-version }}-${{ matrix.package }} + run: tox -f ${{ matrix.python-version }}-${{ matrix.package }} -- --benchmark-json=${{ env.RUN_MATRIX_COMBINATION }}-benchmark.json + - name: Find and merge benchmarks + # TODO: Add at least one benchmark to every package type to remove this + if: matrix.package == 'core' + run: >- + jq -s '.[0].benchmarks = ([.[].benchmarks] | add) + | if .[0].benchmarks == null then null else .[0] end' + opentelemetry-*/tests/*${{ matrix.package }}*-benchmark.json > output.json + - name: Report on benchmark results + # TODO: Add at least one benchmark to every package type to remove this + if: matrix.package == 'core' + uses: rhysd/github-action-benchmark@v1 + with: + name: OpenTelemetry Python Benchmarks - Python ${{ env[matrix.python-version ]}} - ${{ matrix.package-group }} + tool: pytest + output-file-path: output.json + github-token: ${{ secrets.GITHUB_TOKEN }} + # Alert with a commit comment on possible performance regression + alert-threshold: 200% + comment-always: true + fail-on-alert: true + # Make a commit on `gh-pages` with benchmarks from previous step + auto-push: ${{ github.ref == 'refs/heads/master' }} + gh-pages-branch: master + benchmark-data-dir-path: benchmarks misc: strategy: fail-fast: false matrix: - tox-environment: [ "docker-tests", "lint", "docs" ] + tox-environment: [ "docker-tests", "lint", "docs", "mypy", "mypyinstalled", "tracecontext" ] name: ${{ matrix.tox-environment }} runs-on: ubuntu-latest steps: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4981eb5547f..f04808d16a3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -66,6 +66,31 @@ See [`tox.ini`](https://github.com/open-telemetry/opentelemetry-python/blob/master/tox.ini) for more detail on available tox commands. +### Benchmarks + +Performance progression of benchmarks for packages distributed by OpenTelemetry Python can be viewed as a [graph of throughput vs commit history](https://opentelemetry-python.readthedocs.io/en/latest/benchmarks/index.html). From this page, you can download a JSON file with the performance results. + +Running the `tox` tests also runs the performance tests if any are available. Benchmarking tests are done with `pytest-benchmark` and they output a table with results to the console. + +To write benchmarks, simply use the [pytest benchmark fixture](https://pytest-benchmark.readthedocs.io/en/latest/usage.html#usage) like the following: + +```python +def test_simple_start_span(benchmark): + def benchmark_start_as_current_span(span_name, attribute_num): + span = tracer.start_span( + span_name, + attributes={"count": attribute_num}, + ) + span.end() + + benchmark(benchmark_start_as_current_span, "benchmarkedSpan", 42) +``` + +Make sure the test file is under the `tests/performance/benchmarks/` folder of +the package it is benchmarking and further has a path that corresponds to the +file in the package it is testing. Make sure that the file name begins with +`test_benchmark_`. (e.g. `opentelemetry-sdk/tests/performance/benchmarks/trace/propagation/test_benchmark_b3_format.py`) + ## Pull Requests ### How to Send Pull Requests diff --git a/opentelemetry-sdk/tests/performance/benchmarks/trace/test_benchmark_trace.py b/opentelemetry-sdk/tests/performance/benchmarks/trace/test_benchmark_trace.py new file mode 100644 index 00000000000..a407a341f45 --- /dev/null +++ b/opentelemetry-sdk/tests/performance/benchmarks/trace/test_benchmark_trace.py @@ -0,0 +1,51 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import opentelemetry.sdk.trace as trace +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import sampling + +tracer = trace.TracerProvider( + sampler=sampling.DEFAULT_ON, + resource=Resource( + { + "service.name": "A123456789", + "service.version": "1.34567890", + "service.instance.id": "123ab456-a123-12ab-12ab-12340a1abc12", + } + ), +).get_tracer("sdk_tracer_provider") + + +def test_simple_start_span(benchmark): + def benchmark_start_as_current_span(): + span = tracer.start_span( + "benchmarkedSpan", + attributes={"long.attribute": -10000000001000000000}, + ) + span.add_event("benchmarkEvent") + span.end() + + benchmark(benchmark_start_as_current_span) + + +def test_simple_start_as_current_span(benchmark): + def benchmark_start_as_current_span(): + with tracer.start_as_current_span( + "benchmarkedSpan", + attributes={"long.attribute": -10000000001000000000}, + ) as span: + span.add_event("benchmarkEvent") + + benchmark(benchmark_start_as_current_span) diff --git a/tox.ini b/tox.ini index 4dd1bc6b804..c5044993b81 100644 --- a/tox.ini +++ b/tox.ini @@ -49,8 +49,8 @@ envlist = pypy3-test-core-opentracing-shim lint - py39-tracecontext - py39-{mypy,mypyinstalled} + tracecontext + mypy,mypyinstalled docs docker-tests @@ -58,6 +58,7 @@ envlist = deps = -c dev-requirements.txt test: pytest + test: pytest-benchmark coverage: pytest coverage: pytest-cov mypy,mypyinstalled: mypy @@ -164,7 +165,7 @@ changedir = docs commands = sphinx-build -E -a -W -b html -T . _build/html -[testenv:py39-tracecontext] +[testenv:tracecontext] basepython: python3.9 deps = # needed for tracecontext