Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add ability to set max retry attempts #1260

Merged
merged 6 commits into from
Aug 11, 2017
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions botocore/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ def compute_client_args(self, service_model, client_config,
read_timeout=client_config.read_timeout,
max_pool_connections=client_config.max_pool_connections,
proxies=client_config.proxies,
retries=client_config.retries
)
s3_config = self.compute_s3_config(scoped_config,
client_config)
Expand Down
16 changes: 9 additions & 7 deletions botocore/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ def create_client(self, service_name, region_name, is_secure=True,
service_model, region_name, is_secure, endpoint_url,
verify, credentials, scoped_config, client_config, endpoint_bridge)
service_client = cls(**client_args)
self._register_retries(service_client)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Might be worth double checking that moving when the retries gets registered is fine. Now, it is now getting registered to the client's event emitter after the client is created instead of on any service model load and to the client creator's event emitter. I made the change because it is technically more correct (in terms of what event emitter the handler should be registered to), the code flow was better (we are using the computed client config instead of what is provided directly to the create_client() call), and it should not affect any of the other registered handlers.

self._register_s3_events(
service_client, endpoint_bridge, endpoint_url, client_config,
scoped_config)
Expand All @@ -95,11 +96,10 @@ def _load_service_model(self, service_name, api_version=None):
json_model = self._loader.load_service_model(service_name, 'service-2',
api_version=api_version)
service_model = ServiceModel(json_model, service_name=service_name)
self._register_retries(service_model)
return service_model

def _register_retries(self, service_model):
endpoint_prefix = service_model.endpoint_prefix
def _register_retries(self, client):
endpoint_prefix = client.meta.service_model.endpoint_prefix

# First, we load the entire retry config for all services,
# then pull out just the information we need.
Expand All @@ -109,15 +109,17 @@ def _register_retries(self, service_model):

retry_config = self._retry_config_translator.build_retry_config(
endpoint_prefix, original_config.get('retry', {}),
original_config.get('definitions', {}))
original_config.get('definitions', {}),
client.meta.config.retries
)

logger.debug("Registering retry handlers for service: %s",
service_model.service_name)
client.meta.service_model.service_name)
handler = self._retry_handler_factory.create_retry_handler(
retry_config, endpoint_prefix)
unique_id = 'retry-config-%s' % endpoint_prefix
self._event_emitter.register('needs-retry.%s' % endpoint_prefix,
handler, unique_id=unique_id)
client.meta.events.register('needs-retry.%s' % endpoint_prefix,
handler, unique_id=unique_id)

def _register_s3_events(self, client, endpoint_bridge, endpoint_url,
client_config, scoped_config):
Expand Down
24 changes: 23 additions & 1 deletion botocore/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

from botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS
from botocore.exceptions import InvalidS3AddressingStyleError
from botocore.exceptions import InvalidRetryConfiguration


class Config(object):
Expand Down Expand Up @@ -88,6 +89,18 @@ class Config(object):

* path -- Addressing style is always by path. Endpoints will be
addressed as such: s3.amazonaws.com/mybucket

:type retries: dict
:param retries: A dictionary for retry specific configurations.
Valid keys are:

* 'max_attempts' -- An integer representing the maximum number of
retry attempts that will be made on a single request. For
example, setting this value to 2 will result in the request
being retried at most two times after the initial request. Setting
this value to 0 will result in no retries ever being attempted on
the initial request. If not provided, the number of retries will
default to whatever is modeled, which is typically four retries.
"""
OPTION_DEFAULTS = OrderedDict([
('region_name', None),
Expand All @@ -99,7 +112,8 @@ class Config(object):
('parameter_validation', True),
('max_pool_connections', MAX_POOL_CONNECTIONS),
('proxies', None),
('s3', None)
('s3', None),
('retries', None)
])

def __init__(self, *args, **kwargs):
Expand All @@ -117,6 +131,8 @@ def __init__(self, *args, **kwargs):
# Validate the s3 options
self._validate_s3_configuration(self.s3)

self._validate_retry_configuration(self.retries)

def _record_user_provided_options(self, args, kwargs):
option_order = list(self.OPTION_DEFAULTS)
user_provided_options = {}
Expand Down Expand Up @@ -157,6 +173,12 @@ def _validate_s3_configuration(self, s3):
raise InvalidS3AddressingStyleError(
s3_addressing_style=addressing_style)

def _validate_retry_configuration(self, retries):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should probably also validate somewhere that max_attempts is >= 0.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds good. I can add that.

if retries is not None:
for key in retries:
if key not in ['max_attempts']:
raise InvalidRetryConfiguration(retry_config_option=key)

def merge(self, other_config):
"""Merges the config object with another config object

Expand Down
7 changes: 7 additions & 0 deletions botocore/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,13 @@ class InvalidS3AddressingStyleError(BotoCoreError):
)


class InvalidRetryConfiguration(BotoCoreError):
"""Error when invalid retry configuration is specified"""
fmt = (
'Cannot provide retry configuration for "{retry_config_option}". '
'Valid retry configuration options are: \'max_attempts\''
)

class StubResponseError(BotoCoreError):
fmt = 'Error getting response stub for operation {operation_name}: {reason}'

Expand Down
28 changes: 26 additions & 2 deletions botocore/translate.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,22 +11,46 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy

from botocore.utils import merge_dicts


def build_retry_config(endpoint_prefix, retry_model, definitions):
def build_retry_config(endpoint_prefix, retry_model, definitions,
client_retry_config=None):
service_config = retry_model.get(endpoint_prefix, {})
resolve_references(service_config, definitions)
# We want to merge the global defaults with the service specific
# defaults, with the service specific defaults taking precedence.
# So we use the global defaults as the base.
final_retry_config = {'__default__': retry_model.get('__default__', {})}
#
# A deepcopy is done on the retry defaults because it ensures the
# retry model has no chance of getting mutated when the service specific
# configuration or client retry config is merged in.
final_retry_config = {
'__default__': copy.deepcopy(retry_model.get('__default__', {}))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So was this deepcopy always a bug then that we just never ran into in practice until now?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah there was a bug. The only time it would be executed though is if you created a dynamodb client and then created other types of clients after as the other clients would inherit the 10 max_attempts of dynamodb. I added the test_service_specific_defaults_do_not_clobber to make sure that does not happen again.

}
resolve_references(final_retry_config, definitions)
# The merge the service specific config on top.
merge_dicts(final_retry_config, service_config)
if client_retry_config is not None:
_merge_client_retry_config(final_retry_config, client_retry_config)
return final_retry_config


def _merge_client_retry_config(retry_config, client_retry_config):
max_retry_attempts_override = client_retry_config.get('max_attempts')
if max_retry_attempts_override is not None:
# In the retry config, the max_attempts refers to the maximum number
# of requests in general will be made. However, for the client's
# retry config it refers to how many retry attempts will be made at
# most. So to translate this number from the client config, one is
# added to convert it to the maximum number request that will be made
# by including the initial request.
retry_config['__default__'][
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We don't support this now, but if we ever support max_attempts at the operation level, we'll need to revisit how we do this. Might be worth adding a note about that.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can do that.

'max_attempts'] = max_retry_attempts_override + 1


def resolve_references(config, definitions):
"""Recursively replace $ref keys.

Expand Down
97 changes: 97 additions & 0 deletions tests/functional/test_retry.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import BaseSessionTest, mock

from botocore.exceptions import ClientError
from botocore.config import Config


class TestRetry(BaseSessionTest):
def setUp(self):
super(TestRetry, self).setUp()
self.region = 'us-west-2'
self.sleep_patch = mock.patch('time.sleep')
self.sleep_patch.start()

def tearDown(self):
self.sleep_patch.stop()

def add_n_retryable_responses(self, mock_send, num_responses):
responses = []
for _ in range(num_responses):
http_response = mock.Mock()
http_response.status_code = 500
http_response.headers = {}
http_response.content = b'{}'
responses.append(http_response)
mock_send.side_effect = responses

def assert_will_retry_n_times(self, method, num_retries):
num_responses = num_retries + 1
with mock.patch('botocore.endpoint.Session.send') as mock_send:
self.add_n_retryable_responses(mock_send, num_responses)
with self.assertRaisesRegexp(
ClientError, 'reached max retries: %s' % num_retries):
method()
self.assertEqual(mock_send.call_count, num_responses)

def test_can_override_max_attempts(self):
client = self.session.create_client(
'dynamodb', self.region, config=Config(
retries={'max_attempts': 1}))
self.assert_will_retry_n_times(client.list_tables, 1)

def test_do_not_attempt_retries(self):
client = self.session.create_client(
'dynamodb', self.region, config=Config(
retries={'max_attempts': 0}))
self.assert_will_retry_n_times(client.list_tables, 0)

def test_setting_max_attempts_does_not_set_for_other_clients(self):
# Make one client with max attempts configured.
self.session.create_client(
'codecommit', self.region, config=Config(
retries={'max_attempts': 1}))

# Make another client that has no custom retry configured.
client = self.session.create_client('codecommit', self.region)
# It should use the default max retries, which should be four retries
# for this service.
self.assert_will_retry_n_times(client.list_repositories, 4)

def test_service_specific_defaults_do_not_clobber(self):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you explain what this is actually testing? It looks like it's just verifying we're able to load service specific config. I don't really get the clobbering part.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is to test the case:

c = session.create_client('dynamodb')
c2 = session.create_client('ec2')

The second client for ec2 would actually have the max retries of dynamodb, even though it is configured for 5 max_attempts.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I also see what I can do to make the name clearer.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah or just a comment or something to explain it.

# Make a dynamodb client. It's a special case client that is
# configured to a make a maximum of 10 requests (9 retries).
client = self.session.create_client('dynamodb', self.region)
self.assert_will_retry_n_times(client.list_tables, 9)

# A codecommit client is not a special case for retries. It will at
# most make 5 requests (4 retries) for its default.
client = self.session.create_client('codecommit', self.region)
self.assert_will_retry_n_times(client.list_repositories, 4)

def test_set_max_attempts_on_session(self):
self.session.set_default_client_config(
Config(retries={'max_attempts': 1}))
# Max attempts should be inherited from the session.
client = self.session.create_client('codecommit', self.region)
self.assert_will_retry_n_times(client.list_repositories, 1)

def test_can_clobber_max_attempts_on_session(self):
self.session.set_default_client_config(
Config(retries={'max_attempts': 1}))
# Max attempts should override the session's configured max attempts.
client = self.session.create_client(
'codecommit', self.region, config=Config(
retries={'max_attempts': 0}))
self.assert_will_retry_n_times(client.list_repositories, 0)
21 changes: 21 additions & 0 deletions tests/unit/test_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,3 +203,24 @@ def test_region_does_not_resolve_if_not_s3_and_endpoint_url_provided(self):
service_model, 'us-west-2', True, 'http://other.com/', True, None,
{}, config, bridge)
self.assertEqual(client_args['client_config'].region_name, None)

def test_provide_retry_config(self):
self.args_create = args.ClientArgsCreator(
mock.Mock(), None, None, None, None)
service_model = mock.Mock()
service_model.endpoint_prefix = 'ec2'
service_model.metadata = {'protocol': 'query'}
config = botocore.config.Config(
retries={'max_attempts': 10}
)
bridge = mock.Mock()
bridge.resolve.side_effect = [{
'region_name': None, 'signature_version': 'v4',
'endpoint_url': 'http://other.com/', 'signing_name': 'ec2',
'signing_region': None, 'metadata': {}
}]
client_args = self.args_create.get_client_args(
service_model, 'us-west-2', True, 'https://ec2/', True, None,
{}, config, bridge)
self.assertEqual(
client_args['client_config'].retries, {'max_attempts': 10})
31 changes: 31 additions & 0 deletions tests/unit/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from botocore.exceptions import ParamValidationError
from botocore.exceptions import InvalidS3AddressingStyleError
from botocore.exceptions import UnknownSignatureVersionError
from botocore.exceptions import InvalidRetryConfiguration
from botocore.errorfactory import ClientExceptionsFactory
from botocore.stub import Stubber
from botocore import exceptions
Expand Down Expand Up @@ -819,6 +820,26 @@ def test_service_retry_missing_config(self):
for call in event_emitter.register.call_args_list:
self.assertNotIn('needs-retry', call[0][0])

def test_can_override_max_attempts(self):
retry_handler_factory = mock.Mock(botocore.retryhandler)
creator = self.create_client_creator(
retry_handler_factory=retry_handler_factory)
creator.create_client(
'myservice', 'us-west-2',
client_config=botocore.config.Config(retries={'max_attempts': 9}))

retry_handler_factory.create_retry_handler.assert_called_with({
'__default__': {
'delay': {
'growth_factor': 2,
'base': 'rand',
'type': 'exponential'
},
'policies': {},
'max_attempts': 10
}
}, 'myservice')

def test_try_to_paginate_non_paginated(self):
self.loader.load_service_model.side_effect = [
self.service_description,
Expand Down Expand Up @@ -1602,6 +1623,16 @@ def test_merge_overrides_only_when_user_provided_values(self):
self.assertEqual(new_config.region_name, 'us-west-2')
self.assertEqual(new_config.signature_version, 's3v4')

def test_can_set_retry_max_attempts(self):
config = botocore.config.Config(retries={'max_attempts': 15})
self.assertEqual(config.retries['max_attempts'], 15)

def test_validates_retry_config(self):
with self.assertRaisesRegexp(
InvalidRetryConfiguration,
'Cannot provide retry configuration for "not-allowed"'):
botocore.config.Config(retries={'not-allowed': True})


class TestClientEndpointBridge(unittest.TestCase):
def setUp(self):
Expand Down
40 changes: 40 additions & 0 deletions tests/unit/test_translate.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,3 +75,43 @@ def test_resolve_reference(self):
# And we should resolve references.
self.assertEqual(operation_config['policies']['other'],
{"from": {"definition": "file"}})

def test_service_specific_defaults_no_mutate_default_retry(self):
retry = translate.build_retry_config('sts', self.retry['retry'],
self.retry['definitions'])
# sts has a specific policy
self.assertEqual(
retry['__default__'], {
"max_attempts": 5,
"delay": "service_specific_delay",
"policies": {
"global_one": "global",
"override_me": "service",
"service_one": "service",
}
}
)

# The general defaults for the upstream model should not have been
# mutated from building the retry config
self.assertEqual(
self.retry['retry']['__default__'],
{
"max_attempts": 5,
"delay": "global_delay",
"policies": {
"global_one": "global",
"override_me": "global",
}
}
)

def test_client_override_max_attempts(self):
retry = translate.build_retry_config(
'sts', self.retry['retry'], self.retry['definitions'],
client_retry_config={'max_attempts': 9}
)
self.assertEqual(retry['__default__']['max_attempts'], 10)
# But it should not mutate the original retry model
self.assertEqual(
self.retry['retry']['__default__']['max_attempts'], 5)