-
Notifications
You must be signed in to change notification settings - Fork 1.1k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add ability to set max retry attempts #1260
Changes from 2 commits
6361ed1
ab3c005
84c7638
cea3473
c4dc61d
f13e5bf
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,6 +15,7 @@ | |
|
||
from botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS | ||
from botocore.exceptions import InvalidS3AddressingStyleError | ||
from botocore.exceptions import InvalidRetryConfiguration | ||
|
||
|
||
class Config(object): | ||
|
@@ -88,6 +89,18 @@ class Config(object): | |
|
||
* path -- Addressing style is always by path. Endpoints will be | ||
addressed as such: s3.amazonaws.com/mybucket | ||
|
||
:type retries: dict | ||
:param retries: A dictionary for retry specific configurations. | ||
Valid keys are: | ||
|
||
* 'max_attempts' -- An integer representing the maximum number of | ||
retry attempts that will be made on a single request. For | ||
example, setting this value to 2 will result in the request | ||
being retried at most two times after the initial request. Setting | ||
this value to 0 will result in no retries ever being attempted on | ||
the initial request. If not provided, the number of retries will | ||
default to whatever is modeled, which is typically four retries. | ||
""" | ||
OPTION_DEFAULTS = OrderedDict([ | ||
('region_name', None), | ||
|
@@ -99,7 +112,8 @@ class Config(object): | |
('parameter_validation', True), | ||
('max_pool_connections', MAX_POOL_CONNECTIONS), | ||
('proxies', None), | ||
('s3', None) | ||
('s3', None), | ||
('retries', None) | ||
]) | ||
|
||
def __init__(self, *args, **kwargs): | ||
|
@@ -117,6 +131,8 @@ def __init__(self, *args, **kwargs): | |
# Validate the s3 options | ||
self._validate_s3_configuration(self.s3) | ||
|
||
self._validate_retry_configuration(self.retries) | ||
|
||
def _record_user_provided_options(self, args, kwargs): | ||
option_order = list(self.OPTION_DEFAULTS) | ||
user_provided_options = {} | ||
|
@@ -157,6 +173,12 @@ def _validate_s3_configuration(self, s3): | |
raise InvalidS3AddressingStyleError( | ||
s3_addressing_style=addressing_style) | ||
|
||
def _validate_retry_configuration(self, retries): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should probably also validate somewhere that There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sounds good. I can add that. |
||
if retries is not None: | ||
for key in retries: | ||
if key not in ['max_attempts']: | ||
raise InvalidRetryConfiguration(retry_config_option=key) | ||
|
||
def merge(self, other_config): | ||
"""Merges the config object with another config object | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -11,22 +11,46 @@ | |
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
# ANY KIND, either express or implied. See the License for the specific | ||
# language governing permissions and limitations under the License. | ||
import copy | ||
|
||
from botocore.utils import merge_dicts | ||
|
||
|
||
def build_retry_config(endpoint_prefix, retry_model, definitions): | ||
def build_retry_config(endpoint_prefix, retry_model, definitions, | ||
client_retry_config=None): | ||
service_config = retry_model.get(endpoint_prefix, {}) | ||
resolve_references(service_config, definitions) | ||
# We want to merge the global defaults with the service specific | ||
# defaults, with the service specific defaults taking precedence. | ||
# So we use the global defaults as the base. | ||
final_retry_config = {'__default__': retry_model.get('__default__', {})} | ||
# | ||
# A deepcopy is done on the retry defaults because it ensures the | ||
# retry model has no chance of getting mutated when the service specific | ||
# configuration or client retry config is merged in. | ||
final_retry_config = { | ||
'__default__': copy.deepcopy(retry_model.get('__default__', {})) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So was this deepcopy always a bug then that we just never ran into in practice until now? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah there was a bug. The only time it would be executed though is if you created a dynamodb client and then created other types of clients after as the other clients would inherit the 10 max_attempts of dynamodb. I added the |
||
} | ||
resolve_references(final_retry_config, definitions) | ||
# The merge the service specific config on top. | ||
merge_dicts(final_retry_config, service_config) | ||
if client_retry_config is not None: | ||
_merge_client_retry_config(final_retry_config, client_retry_config) | ||
return final_retry_config | ||
|
||
|
||
def _merge_client_retry_config(retry_config, client_retry_config): | ||
max_retry_attempts_override = client_retry_config.get('max_attempts') | ||
if max_retry_attempts_override is not None: | ||
# In the retry config, the max_attempts refers to the maximum number | ||
# of requests in general will be made. However, for the client's | ||
# retry config it refers to how many retry attempts will be made at | ||
# most. So to translate this number from the client config, one is | ||
# added to convert it to the maximum number request that will be made | ||
# by including the initial request. | ||
retry_config['__default__'][ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We don't support this now, but if we ever support There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I can do that. |
||
'max_attempts'] = max_retry_attempts_override + 1 | ||
|
||
|
||
def resolve_references(config, definitions): | ||
"""Recursively replace $ref keys. | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,97 @@ | ||
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
# may not use this file except in compliance with the License. A copy of | ||
# the License is located at | ||
# | ||
# http://aws.amazon.com/apache2.0/ | ||
# | ||
# or in the "license" file accompanying this file. This file is | ||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
# ANY KIND, either express or implied. See the License for the specific | ||
# language governing permissions and limitations under the License. | ||
from tests import BaseSessionTest, mock | ||
|
||
from botocore.exceptions import ClientError | ||
from botocore.config import Config | ||
|
||
|
||
class TestRetry(BaseSessionTest): | ||
def setUp(self): | ||
super(TestRetry, self).setUp() | ||
self.region = 'us-west-2' | ||
self.sleep_patch = mock.patch('time.sleep') | ||
self.sleep_patch.start() | ||
|
||
def tearDown(self): | ||
self.sleep_patch.stop() | ||
|
||
def add_n_retryable_responses(self, mock_send, num_responses): | ||
responses = [] | ||
for _ in range(num_responses): | ||
http_response = mock.Mock() | ||
http_response.status_code = 500 | ||
http_response.headers = {} | ||
http_response.content = b'{}' | ||
responses.append(http_response) | ||
mock_send.side_effect = responses | ||
|
||
def assert_will_retry_n_times(self, method, num_retries): | ||
num_responses = num_retries + 1 | ||
with mock.patch('botocore.endpoint.Session.send') as mock_send: | ||
self.add_n_retryable_responses(mock_send, num_responses) | ||
with self.assertRaisesRegexp( | ||
ClientError, 'reached max retries: %s' % num_retries): | ||
method() | ||
self.assertEqual(mock_send.call_count, num_responses) | ||
|
||
def test_can_override_max_attempts(self): | ||
client = self.session.create_client( | ||
'dynamodb', self.region, config=Config( | ||
retries={'max_attempts': 1})) | ||
self.assert_will_retry_n_times(client.list_tables, 1) | ||
|
||
def test_do_not_attempt_retries(self): | ||
client = self.session.create_client( | ||
'dynamodb', self.region, config=Config( | ||
retries={'max_attempts': 0})) | ||
self.assert_will_retry_n_times(client.list_tables, 0) | ||
|
||
def test_setting_max_attempts_does_not_set_for_other_clients(self): | ||
# Make one client with max attempts configured. | ||
self.session.create_client( | ||
'codecommit', self.region, config=Config( | ||
retries={'max_attempts': 1})) | ||
|
||
# Make another client that has no custom retry configured. | ||
client = self.session.create_client('codecommit', self.region) | ||
# It should use the default max retries, which should be four retries | ||
# for this service. | ||
self.assert_will_retry_n_times(client.list_repositories, 4) | ||
|
||
def test_service_specific_defaults_do_not_clobber(self): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you explain what this is actually testing? It looks like it's just verifying we're able to load service specific config. I don't really get the clobbering part. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is to test the case: c = session.create_client('dynamodb')
c2 = session.create_client('ec2') The second client for ec2 would actually have the max retries of dynamodb, even though it is configured for 5 max_attempts. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I also see what I can do to make the name clearer. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yeah or just a comment or something to explain it. |
||
# Make a dynamodb client. It's a special case client that is | ||
# configured to a make a maximum of 10 requests (9 retries). | ||
client = self.session.create_client('dynamodb', self.region) | ||
self.assert_will_retry_n_times(client.list_tables, 9) | ||
|
||
# A codecommit client is not a special case for retries. It will at | ||
# most make 5 requests (4 retries) for its default. | ||
client = self.session.create_client('codecommit', self.region) | ||
self.assert_will_retry_n_times(client.list_repositories, 4) | ||
|
||
def test_set_max_attempts_on_session(self): | ||
self.session.set_default_client_config( | ||
Config(retries={'max_attempts': 1})) | ||
# Max attempts should be inherited from the session. | ||
client = self.session.create_client('codecommit', self.region) | ||
self.assert_will_retry_n_times(client.list_repositories, 1) | ||
|
||
def test_can_clobber_max_attempts_on_session(self): | ||
self.session.set_default_client_config( | ||
Config(retries={'max_attempts': 1})) | ||
# Max attempts should override the session's configured max attempts. | ||
client = self.session.create_client( | ||
'codecommit', self.region, config=Config( | ||
retries={'max_attempts': 0})) | ||
self.assert_will_retry_n_times(client.list_repositories, 0) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Might be worth double checking that moving when the retries gets registered is fine. Now, it is now getting registered to the client's event emitter after the client is created instead of on any service model load and to the client creator's event emitter. I made the change because it is technically more correct (in terms of what event emitter the handler should be registered to), the code flow was better (we are using the computed client config instead of what is provided directly to the
create_client()
call), and it should not affect any of the other registered handlers.