Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
ocelotl committed Nov 7, 2022
1 parent a1271e2 commit 6604d85
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 33 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,6 @@ def _translate_data(
)

else:
[data_point for data_point in metric.data.data_points]
_logger.warning(
"unsupported data type %s",
metric.data.__class__.__name__,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -364,13 +364,15 @@ def collect(

# pylint: disable=protected-access
class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]):
# min_max_size is the smallest reasonable configuration, which is small
# enough to contain the entire normal floating point range at min
# scale.
# _min_max_size and _max_max_size are the smallest and largest values
# the max_size parameter may have, respectively.

# _min_max_size is is the smallest reasonable value which is small enough
# to contain the entire normal floating point range at the minimum scale.
_min_max_size = 2

# max_max_size is an arbitrary limit meant to limit accidental use of
# giant histograms.
# _max_max_size is an arbitrary limit meant to limit accidental creation of
# giant exponential bucket histograms.
_max_max_size = 16384

def __init__(
Expand All @@ -384,9 +386,8 @@ def __init__(
max_size: int = 160,
):
super().__init__(attributes)
# maxSize is the maximum capacity of the positive and negative ranges.
# it is set by Init(), preserved by Copy and Move.)

# max_size is the maximum capacity of the positive and negative
# buckets.
if max_size < self._min_max_size:
raise Exception("size {max_size} is smaller than {min_max_size}")

Expand All @@ -395,32 +396,32 @@ def __init__(

self._max_size = max_size

# _sum is the sum of all calls to aggregate reflected in the
# aggregator.
# _sum is the sum of all the values aggregated by this aggregator.
self._sum = 0

# count is incremented by 1 per call to aggregate.
# _count is the count of all calls to aggregate.
self._count = 0

# zero_count is incremented by 1 when the measured value is exactly 0.
# _zero_count is the count of all the calls to aggregate when the value
# to be aggregated is exactly 0.
self._zero_count = 0

# _min is set when count > 0
self._min = 0
# _min is the smallest value aggregated by this aggregator.
self._min = inf

# _max is set when count > 0
self._max = 0
# _max is the smallest value aggregated by this aggregator.
self._max = -inf

# _positive holds the positive values
# _positive holds the positive values.
self._positive = Buckets()

# _negative holds the negative values by their absolute value
# _negative holds the negative values by their absolute value.
self._negative = Buckets()

# _mapping corresponds to the current scale, is shared by both positive
# and negative ranges.

# _mapping corresponds to the current scale, is shared by both the
# positive and negative buckets.
self._mapping = LogarithmMapping(LogarithmMapping._max_scale)

self._instrument_temporality = AggregationTemporality.DELTA
self._start_time_unix_nano = start_time_unix_nano

Expand Down Expand Up @@ -763,7 +764,7 @@ def _grow(self, buckets: Buckets, needed: int):
new_size = self._max_size

new_positive_limit = new_size - bias
buckets._backing.grow_to(
buckets._backing.grow(
new_size, old_positive_limit, new_positive_limit
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,15 @@
from abc import ABC, abstractmethod


class BucketsBacking(ABC):
class Backing(ABC):
@abstractmethod
def size(self) -> int:
"""
Returns the physical size of the backing array, which is
>= buckets.Len() the number allocated.
Returns the physical size of
"""

@abstractmethod
def grow_to(
def grow(
self, new_size: int, old_positive_limit: int, new_positive_limit: int
) -> None:
"""
Expand Down Expand Up @@ -63,7 +62,7 @@ def reset(self) -> None:
"""


class BucketsVarWidth(BucketsBacking):
class VariableWidthBacking(Backing):
def __init__(self):

self._counts = [0]
Expand All @@ -75,16 +74,13 @@ def size(self) -> int:
"""
return len(self._counts)

def grow_to(
def grow(
self, new_size: int, old_positive_limit: int, new_positive_limit: int
) -> None:
"""
Grows the backing array into a new size and copies old entries into
their correct new positions.
"""
# FIXME this follows Go implementation maybe too closely. Since we
# don't need to request memory for a larger list, maybe this can be
# implemented in a more pythonical way.
tmp = [0] * new_size
tmp[new_positive_limit:] = self._counts[old_positive_limit:]
tmp[0:old_positive_limit] = self._counts[0:old_positive_limit]
Expand Down Expand Up @@ -132,7 +128,7 @@ def reset(self) -> None:

class Buckets:
def __init__(self):
self._backing = BucketsVarWidth()
self._backing = VariableWidthBacking()

# The term "index" refers to the number of the
# histogram bucket used to determine its boundaries.
Expand Down

0 comments on commit 6604d85

Please sign in to comment.