Skip to content

Commit

Permalink
Add micro benchmark tests for metric instrument operations (#3267)
Browse files Browse the repository at this point in the history
Co-authored-by: Srikanth Chekuri <[email protected]>
  • Loading branch information
lzchen and srikanthccv authored Aug 17, 2023
1 parent fccd958 commit 8e81bbf
Show file tree
Hide file tree
Showing 3 changed files with 205 additions and 0 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
([#3223](https://github.com/open-telemetry/opentelemetry-python/pull/3223))
- Add speced out environment variables and arguments for BatchLogRecordProcessor
([#3237](https://github.com/open-telemetry/opentelemetry-python/pull/3237))
- Add benchmark tests for metrics
([#3267](https://github.com/open-telemetry/opentelemetry-python/pull/3267))


## Version 1.17.0/0.38b0 (2023-03-22)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest

from opentelemetry.sdk.metrics import Counter, MeterProvider
from opentelemetry.sdk.metrics.export import (
AggregationTemporality,
InMemoryMetricReader,
)

reader_cumulative = InMemoryMetricReader()
reader_delta = InMemoryMetricReader(
preferred_temporality={
Counter: AggregationTemporality.DELTA,
},
)
provider_reader_cumulative = MeterProvider(
metric_readers=[reader_cumulative],
)
provider_reader_delta = MeterProvider(metric_readers=[reader_delta])
meter_cumulative = provider_reader_cumulative.get_meter("sdk_meter_provider")
meter_delta = provider_reader_delta.get_meter("sdk_meter_provider_delta")
counter_cumulative = meter_cumulative.create_counter("test_counter")
counter_delta = meter_delta.create_counter("test_counter2")
udcounter = meter_cumulative.create_up_down_counter("test_udcounter")


@pytest.mark.parametrize(
("num_labels", "temporality"),
[
(0, "delta"),
(1, "delta"),
(3, "delta"),
(5, "delta"),
(10, "delta"),
(0, "cumulative"),
(1, "cumulative"),
(3, "cumulative"),
(5, "cumulative"),
(10, "cumulative"),
],
)
def test_counter_add(benchmark, num_labels, temporality):
labels = {}
for i in range(num_labels):
labels = {f"Key{i}": f"Value{i}" for i in range(num_labels)}

def benchmark_counter_add():
if temporality == "cumulative":
counter_cumulative.add(1, labels)
else:
counter_delta.add(1, labels)

benchmark(benchmark_counter_add)


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 10])
def test_up_down_counter_add(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels = {f"Key{i}": f"Value{i}" for i in range(num_labels)}

def benchmark_up_down_counter_add():
udcounter.add(1, labels)

benchmark(benchmark_up_down_counter_add)
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random

import pytest

from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import InMemoryMetricReader
from opentelemetry.sdk.metrics.view import (
ExplicitBucketHistogramAggregation,
View,
)

MAX_BOUND_VALUE = 10000


def _generate_bounds(bound_count):
bounds = []
for i in range(bound_count):
bounds.append(i * MAX_BOUND_VALUE / bound_count)
return bounds


hist_view_10 = View(
instrument_name="test_histogram_10_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(10)),
)
hist_view_49 = View(
instrument_name="test_histogram_49_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(49)),
)
hist_view_50 = View(
instrument_name="test_histogram_50_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(50)),
)
hist_view_1000 = View(
instrument_name="test_histogram_1000_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(1000)),
)
reader = InMemoryMetricReader()
provider = MeterProvider(
metric_readers=[reader],
views=[
hist_view_10,
hist_view_49,
hist_view_50,
hist_view_1000,
],
)
meter = provider.get_meter("sdk_meter_provider")
hist = meter.create_histogram("test_histogram_default")
hist10 = meter.create_histogram("test_histogram_10_bound")
hist49 = meter.create_histogram("test_histogram_49_bound")
hist50 = meter.create_histogram("test_histogram_50_bound")
hist1000 = meter.create_histogram("test_histogram_1000_bound")


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)

def benchmark_histogram_record():
hist.record(random.random() * MAX_BOUND_VALUE)

benchmark(benchmark_histogram_record)


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_10(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)

def benchmark_histogram_record_10():
hist10.record(random.random() * MAX_BOUND_VALUE)

benchmark(benchmark_histogram_record_10)


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_49(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)

def benchmark_histogram_record_49():
hist49.record(random.random() * MAX_BOUND_VALUE)

benchmark(benchmark_histogram_record_49)


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_50(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)

def benchmark_histogram_record_50():
hist50.record(random.random() * MAX_BOUND_VALUE)

benchmark(benchmark_histogram_record_50)


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_1000(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)

def benchmark_histogram_record_1000():
hist1000.record(random.random() * MAX_BOUND_VALUE)

benchmark(benchmark_histogram_record_1000)

0 comments on commit 8e81bbf

Please sign in to comment.