Skip to content

Commit

Permalink
Adjust exp backoff according to max request time
Browse files Browse the repository at this point in the history
For each block range, we make multiple requests with different filters.
Before this commit, only the last request duration has been used to
adjust the batch size. But the point of the adjustment is to prevent any
request from running into a timeout, so adjusting based on the maximal
duration of all requests is a better way to achieve this.
  • Loading branch information
karlb committed Nov 5, 2020
1 parent cb39ef8 commit 6cd56d3
Showing 1 changed file with 6 additions and 5 deletions.
11 changes: 6 additions & 5 deletions raiden/blockchain/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,7 @@ def fetch_logs_in_batch(self, target_block_number: BlockNumber) -> Optional[Poll
# go through lots of elements).

try:
decoded_result, request_duration = self._query_and_track(from_block, to_block)
decoded_result, max_request_duration = self._query_and_track(from_block, to_block)
except EthGetLogsTimeout:
# The request timed out - this typically means the node wasn't able to process
# the requested batch size fast enough.
Expand All @@ -416,15 +416,15 @@ def fetch_logs_in_batch(self, target_block_number: BlockNumber) -> Optional[Poll
# Adjust block batch size depending on request duration.
# To reduce oscillating the batch size is kept constant for request durations
# between ``ETH_GET_LOGS_THRESHOLD_FAST`` and ``ETH_GET_LOGS_THRESHOLD_SLOW``.
if request_duration < ETH_GET_LOGS_THRESHOLD_FAST:
if max_request_duration < ETH_GET_LOGS_THRESHOLD_FAST:
# The request was fast, increase batch size
if can_use_bigger_batches:
# But only if we actually need bigger batches. This prevents the batch
# size from ballooning towards the maximum after the initial sync is done
# since then typically only one block is fetched at a time which is usually
# fast.
self.block_batch_size_adjuster.increase()
elif request_duration > ETH_GET_LOGS_THRESHOLD_SLOW:
elif max_request_duration > ETH_GET_LOGS_THRESHOLD_SLOW:
# The request is taking longer than the 'slow' threshold - decrease
# the batch size
self.block_batch_size_adjuster.decrease()
Expand Down Expand Up @@ -473,7 +473,7 @@ def _query_and_track(
*all* filters will start from 9, thus missing the event for the new
channel on block 8.
"""
request_duration: float = 0
max_request_duration: float = 0
result: List[DecodedEvent] = []
event_filter: Optional[RaidenContractFilter] = self.event_filter

Expand Down Expand Up @@ -513,6 +513,7 @@ def _query_and_track(
RPCEndpoint("eth_getLogs"), [filter_params]
)
request_duration = time.monotonic() - start
max_request_duration = max(max_request_duration, request_duration)
except ReadTimeout as ex:
# The request timed out while waiting for a response (as opposed to a
# ConnectTimeout).
Expand Down Expand Up @@ -567,7 +568,7 @@ def _query_and_track(
else:
event_filter = None

return result, request_duration
return result, max_request_duration

def uninstall_all_event_listeners(self) -> None:
with self._filters_lock:
Expand Down

0 comments on commit 6cd56d3

Please sign in to comment.