diff --git a/CHANGELOG.md b/CHANGELOG.md index 765cd847c7b..f6c2c8deba7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3223](https://github.com/open-telemetry/opentelemetry-python/pull/3223)) - Add speced out environment variables and arguments for BatchLogRecordProcessor ([#3237](https://github.com/open-telemetry/opentelemetry-python/pull/3237)) +- Add benchmark tests for metrics + ([#3267](https://github.com/open-telemetry/opentelemetry-python/pull/3267)) ## Version 1.17.0/0.38b0 (2023-03-22) diff --git a/opentelemetry-api/pyproject.toml b/opentelemetry-api/pyproject.toml index f659d777576..d03e870dfbf 100644 --- a/opentelemetry-api/pyproject.toml +++ b/opentelemetry-api/pyproject.toml @@ -28,7 +28,7 @@ dependencies = [ "Deprecated >= 1.2.6", # FIXME This should be able to be removed after 3.12 is released if there is a reliable API # in importlib.metadata. - "importlib-metadata ~= 6.0", + "importlib-metadata >= 6.0", ] dynamic = [ "version", diff --git a/opentelemetry-sdk/tests/performance/benchmarks/metrics/test_benchmark_metrics.py b/opentelemetry-sdk/tests/performance/benchmarks/metrics/test_benchmark_metrics.py new file mode 100644 index 00000000000..81fb0b6e1d8 --- /dev/null +++ b/opentelemetry-sdk/tests/performance/benchmarks/metrics/test_benchmark_metrics.py @@ -0,0 +1,77 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from opentelemetry.sdk.metrics import Counter, MeterProvider +from opentelemetry.sdk.metrics.export import ( + AggregationTemporality, + InMemoryMetricReader, +) + +reader_cumulative = InMemoryMetricReader() +reader_delta = InMemoryMetricReader( + preferred_temporality={ + Counter: AggregationTemporality.DELTA, + }, +) +provider_reader_cumulative = MeterProvider( + metric_readers=[reader_cumulative], +) +provider_reader_delta = MeterProvider(metric_readers=[reader_delta]) +meter_cumulative = provider_reader_cumulative.get_meter("sdk_meter_provider") +meter_delta = provider_reader_delta.get_meter("sdk_meter_provider_delta") +counter_cumulative = meter_cumulative.create_counter("test_counter") +counter_delta = meter_delta.create_counter("test_counter2") +udcounter = meter_cumulative.create_up_down_counter("test_udcounter") + + +@pytest.mark.parametrize( + ("num_labels", "temporality"), + [ + (0, "delta"), + (1, "delta"), + (3, "delta"), + (5, "delta"), + (10, "delta"), + (0, "cumulative"), + (1, "cumulative"), + (3, "cumulative"), + (5, "cumulative"), + (10, "cumulative"), + ], +) +def test_counter_add(benchmark, num_labels, temporality): + labels = {} + for i in range(num_labels): + labels = {f"Key{i}": f"Value{i}" for i in range(num_labels)} + + def benchmark_counter_add(): + if temporality == "cumulative": + counter_cumulative.add(1, labels) + else: + counter_delta.add(1, labels) + + benchmark(benchmark_counter_add) + + +@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 10]) +def test_up_down_counter_add(benchmark, num_labels): + labels = {} + for i in range(num_labels): + labels = {f"Key{i}": f"Value{i}" for i in range(num_labels)} + + def benchmark_up_down_counter_add(): + udcounter.add(1, labels) + + benchmark(benchmark_up_down_counter_add) diff --git a/opentelemetry-sdk/tests/performance/benchmarks/metrics/test_benchmark_metrics_histogram,.py b/opentelemetry-sdk/tests/performance/benchmarks/metrics/test_benchmark_metrics_histogram,.py new file mode 100644 index 00000000000..2f9c4405418 --- /dev/null +++ b/opentelemetry-sdk/tests/performance/benchmarks/metrics/test_benchmark_metrics_histogram,.py @@ -0,0 +1,126 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import random + +import pytest + +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.export import InMemoryMetricReader +from opentelemetry.sdk.metrics.view import ( + ExplicitBucketHistogramAggregation, + View, +) + +MAX_BOUND_VALUE = 10000 + + +def _generate_bounds(bound_count): + bounds = [] + for i in range(bound_count): + bounds.append(i * MAX_BOUND_VALUE / bound_count) + return bounds + + +hist_view_10 = View( + instrument_name="test_histogram_10_bound", + aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(10)), +) +hist_view_49 = View( + instrument_name="test_histogram_49_bound", + aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(49)), +) +hist_view_50 = View( + instrument_name="test_histogram_50_bound", + aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(50)), +) +hist_view_1000 = View( + instrument_name="test_histogram_1000_bound", + aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(1000)), +) +reader = InMemoryMetricReader() +provider = MeterProvider( + metric_readers=[reader], + views=[ + hist_view_10, + hist_view_49, + hist_view_50, + hist_view_1000, + ], +) +meter = provider.get_meter("sdk_meter_provider") +hist = meter.create_histogram("test_histogram_default") +hist10 = meter.create_histogram("test_histogram_10_bound") +hist49 = meter.create_histogram("test_histogram_49_bound") +hist50 = meter.create_histogram("test_histogram_50_bound") +hist1000 = meter.create_histogram("test_histogram_1000_bound") + + +@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7]) +def test_histogram_record(benchmark, num_labels): + labels = {} + for i in range(num_labels): + labels["Key{}".format(i)] = "Value{}".format(i) + + def benchmark_histogram_record(): + hist.record(random.random() * MAX_BOUND_VALUE) + + benchmark(benchmark_histogram_record) + + +@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7]) +def test_histogram_record_10(benchmark, num_labels): + labels = {} + for i in range(num_labels): + labels["Key{}".format(i)] = "Value{}".format(i) + + def benchmark_histogram_record_10(): + hist10.record(random.random() * MAX_BOUND_VALUE) + + benchmark(benchmark_histogram_record_10) + + +@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7]) +def test_histogram_record_49(benchmark, num_labels): + labels = {} + for i in range(num_labels): + labels["Key{}".format(i)] = "Value{}".format(i) + + def benchmark_histogram_record_49(): + hist49.record(random.random() * MAX_BOUND_VALUE) + + benchmark(benchmark_histogram_record_49) + + +@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7]) +def test_histogram_record_50(benchmark, num_labels): + labels = {} + for i in range(num_labels): + labels["Key{}".format(i)] = "Value{}".format(i) + + def benchmark_histogram_record_50(): + hist50.record(random.random() * MAX_BOUND_VALUE) + + benchmark(benchmark_histogram_record_50) + + +@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7]) +def test_histogram_record_1000(benchmark, num_labels): + labels = {} + for i in range(num_labels): + labels["Key{}".format(i)] = "Value{}".format(i) + + def benchmark_histogram_record_1000(): + hist1000.record(random.random() * MAX_BOUND_VALUE) + + benchmark(benchmark_histogram_record_1000)