-
-
Notifications
You must be signed in to change notification settings - Fork 2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fixes some bugs in the limit connection feature #2964
Changes from 6 commits
caea886
58495e3
96acca9
199b2bd
b94c684
ad92b5e
9e54abf
38011f4
ab2a5f6
69bf66a
9e958ff
ff39bb8
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
Fixes some bugs in the limit connection feature |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,6 @@ | ||
import asyncio | ||
import functools | ||
import random | ||
import sys | ||
import traceback | ||
import warnings | ||
|
@@ -359,9 +360,14 @@ def closed(self): | |
""" | ||
return self._closed | ||
|
||
async def connect(self, req, traces=None): | ||
"""Get from pool or create new connection.""" | ||
key = req.connection_key | ||
def _available_connections(self, key): | ||
""" | ||
Return number of available connections taking into account | ||
the limit, limit_per_host and the connection key. | ||
|
||
If it returns less than 1 means that there is no connections | ||
availables. | ||
""" | ||
|
||
if self._limit: | ||
# total calc available connections | ||
|
@@ -380,6 +386,13 @@ async def connect(self, req, traces=None): | |
else: | ||
available = 1 | ||
|
||
return available | ||
|
||
async def connect(self, req, traces=None): | ||
"""Get from pool or create new connection.""" | ||
key = req.connection_key | ||
available = self._available_connections(key) | ||
|
||
# Wait if there are no available connections. | ||
if available <= 0: | ||
fut = self._loop.create_future() | ||
|
@@ -394,7 +407,7 @@ async def connect(self, req, traces=None): | |
|
||
try: | ||
await fut | ||
except BaseException: | ||
except BaseException as e: | ||
# remove a waiter even if it was cancelled, normally it's | ||
# removed when it's notified | ||
try: | ||
|
@@ -405,7 +418,7 @@ async def connect(self, req, traces=None): | |
if not waiters: | ||
del self._waiters[key] | ||
|
||
raise | ||
raise e | ||
|
||
if traces: | ||
for trace in traces: | ||
|
@@ -430,12 +443,12 @@ async def connect(self, req, traces=None): | |
proto.close() | ||
raise ClientConnectionError("Connector is closed.") | ||
except BaseException: | ||
# signal to waiter | ||
if key in self._waiters: | ||
waiters = self._waiters[key] | ||
self._release_key_waiter(key, waiters) | ||
if not self._closed: | ||
self._acquired.remove(placeholder) | ||
self._drop_acquired_per_host(key, placeholder) | ||
self._release_waiter() | ||
raise | ||
finally: | ||
else: | ||
if not self._closed: | ||
self._acquired.remove(placeholder) | ||
self._drop_acquired_per_host(key, placeholder) | ||
|
@@ -477,35 +490,37 @@ def _get(self, key): | |
del self._conns[key] | ||
return None | ||
|
||
def _release_key_waiter(self, key, waiters): | ||
if not waiters: | ||
return False | ||
def _release_waiter(self): | ||
""" | ||
Iterates over all waiters till found one that is not finsihed and | ||
belongs to a host that has available connections. | ||
""" | ||
if not self._waiters: | ||
return | ||
|
||
waiter = waiters.popleft() | ||
if not waiter.done(): | ||
waiter.set_result(None) | ||
# Having the dict keys ordered this avoids to iterate | ||
# at the same order at each call. | ||
queues = list(self._waiters.keys()) | ||
random.shuffle(queues) | ||
|
||
if not waiters: | ||
del self._waiters[key] | ||
for key in queues: | ||
if self._available_connections(key) < 1: | ||
continue | ||
|
||
return True | ||
waiters = self._waiters[key] | ||
|
||
def _release_waiter(self): | ||
# always release only one waiter | ||
while waiters: | ||
waiter = waiters.popleft() | ||
if not waiter.done(): | ||
waiter.set_result(None) | ||
|
||
if self._limit: | ||
# if we have limit and we have available | ||
if self._limit - len(self._acquired) > 0: | ||
for key, waiters in self._waiters.items(): | ||
if self._release_key_waiter(key, waiters): | ||
break | ||
|
||
elif self._limit_per_host: | ||
# if we have dont have limit but have limit per host | ||
# then release first available | ||
for key, waiters in self._waiters.items(): | ||
if self._release_key_waiter(key, waiters): | ||
break | ||
if not waiters: | ||
del self._waiters[key] | ||
|
||
return | ||
|
||
if not waiters: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The condition is always true. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I mean it would be nice to have a test case where waiters become non-empty after releasing. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes it doesn't make sense right now let me get rid of this part of the code and I will check the tests. |
||
del self._waiters[key] | ||
|
||
def _release_acquired(self, key, proto): | ||
if self._closed: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why do we need shuffling?
The operation is not free.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, that hurts me. But without the shuffle the iteration of the queues will be done always in the same order, having chances of starvation for certain queues.
We could try to get rid of that operation, but it will imply some important changes.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The idea of @thehesiod of having a "true" FIFO would get rid of this operation, but my gut feeling says that this FIFO queue will have enough footprint - has to take care of the limit per host - to make it slower than just a call
shuffle
.PD: a shuffle operation takes around 1 microsecond for a list of 10 elements.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
waiters
is adeque
.deque.rotate()
is cheap. Can we use it?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Remember that
_waiters
is the dictionary that has the queues. So, unfortunately we can't use it.We might implement the FIFO on top a unique
deque
using therotate
method as a way to skip a waiter that can't be deallocated because of the limit per host restriction. I didn't know but the slice of the head element is O(1), so the check and later the skip can be done without losing the deque properties.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Got it. The PR's code is ok to me.