diff --git a/.changes/1.12.6.json b/.changes/1.12.6.json new file mode 100644 index 0000000000..e081ee08b9 --- /dev/null +++ b/.changes/1.12.6.json @@ -0,0 +1,17 @@ +[ + { + "category": "Serialization", + "description": "Fixes `#1557 `__. Fixed a regression in serialization where request bodies would be improperly encoded.", + "type": "bugfix" + }, + { + "category": "``es``", + "description": "Update es client to latest version", + "type": "api-change" + }, + { + "category": "``rekognition``", + "description": "Update rekognition client to latest version", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d69c3c2475..609f80117a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,14 @@ CHANGELOG ========= +1.12.6 +====== + +* bugfix:Serialization: Fixes `#1557 `__. Fixed a regression in serialization where request bodies would be improperly encoded. +* api-change:``es``: Update es client to latest version +* api-change:``rekognition``: Update rekognition client to latest version + + 1.12.5 ====== diff --git a/botocore/__init__.py b/botocore/__init__.py index 3dc57e672b..56f41074c9 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import re import logging -__version__ = '1.12.5' +__version__ = '1.12.6' class NullHandler(logging.Handler): diff --git a/botocore/awsrequest.py b/botocore/awsrequest.py index d245bf9bef..e4969db3dc 100644 --- a/botocore/awsrequest.py +++ b/botocore/awsrequest.py @@ -464,6 +464,15 @@ def prepare_headers(self, headers): headers = headers or {} self.headers = HeadersDict(headers.items()) + def _to_utf8(self, item): + key, value = item + if isinstance(key, six.text_type): + key = key.encode('utf-8') + if isinstance(value, six.text_type): + value = value.encode('utf-8') + + return key, value + def prepare_body(self, data): """Prepares the given HTTP body data.""" self.body = data @@ -478,7 +487,7 @@ def prepare_body(self, data): self.headers['Content-Length'] = '0' if isinstance(self.body, dict): - params = list(self.body.items()) + params = [self._to_utf8(item) for item in self.body.items()] self.body = urlencode(params, doseq=True) try: diff --git a/botocore/data/es/2015-01-01/service-2.json b/botocore/data/es/2015-01-01/service-2.json index 920507a95a..e14e7a8fd3 100644 --- a/botocore/data/es/2015-01-01/service-2.json +++ b/botocore/data/es/2015-01-01/service-2.json @@ -545,6 +545,10 @@ "shape":"EncryptionAtRestOptions", "documentation":"

Specifies the Encryption At Rest Options.

" }, + "NodeToNodeEncryptionOptions":{ + "shape":"NodeToNodeEncryptionOptions", + "documentation":"

Specifies the NodeToNodeEncryptionOptions.

" + }, "AdvancedOptions":{ "shape":"AdvancedOptions", "documentation":"

Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Configuration Advanced Options for more information.

" @@ -978,6 +982,10 @@ "shape":"EncryptionAtRestOptionsStatus", "documentation":"

Specifies the EncryptionAtRestOptions for the Elasticsearch domain.

" }, + "NodeToNodeEncryptionOptions":{ + "shape":"NodeToNodeEncryptionOptionsStatus", + "documentation":"

Specifies the NodeToNodeEncryptionOptions for the Elasticsearch domain.

" + }, "AdvancedOptions":{ "shape":"AdvancedOptionsStatus", "documentation":"

Specifies the AdvancedOptions for the domain. See Configuring Advanced Options for more information.

" @@ -1063,6 +1071,10 @@ "shape":"EncryptionAtRestOptions", "documentation":"

Specifies the status of the EncryptionAtRestOptions.

" }, + "NodeToNodeEncryptionOptions":{ + "shape":"NodeToNodeEncryptionOptions", + "documentation":"

Specifies the status of the NodeToNodeEncryptionOptions.

" + }, "AdvancedOptions":{ "shape":"AdvancedOptions", "documentation":"

Specifies the status of the AdvancedOptions

" @@ -1486,6 +1498,34 @@ "type":"string", "documentation":"

Paginated APIs accepts NextToken input to returns next page results and provides a NextToken output in the response which can be used by the client to retrieve more results.

" }, + "NodeToNodeEncryptionOptions":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Specify true to enable node-to-node encryption.

" + } + }, + "documentation":"

Specifies the node-to-node encryption options.

" + }, + "NodeToNodeEncryptionOptionsStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{ + "shape":"NodeToNodeEncryptionOptions", + "documentation":"

Specifies the node-to-node encryption options for the specified Elasticsearch domain.

" + }, + "Status":{ + "shape":"OptionStatus", + "documentation":"

Specifies the status of the node-to-node encryption options for the specified Elasticsearch domain.

" + } + }, + "documentation":"

Status of the node-to-node encryption options for the specified Elasticsearch domain.

" + }, "OptionState":{ "type":"string", "documentation":"

The state of a requested change. One of the following:

", diff --git a/botocore/data/rekognition/2016-06-27/service-2.json b/botocore/data/rekognition/2016-06-27/service-2.json index d516b881d3..581f56c2a6 100644 --- a/botocore/data/rekognition/2016-06-27/service-2.json +++ b/botocore/data/rekognition/2016-06-27/service-2.json @@ -391,7 +391,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition does not save the actual faces detected. Instead, the underlying detection algorithm first detects the faces in the input image, and for each face extracts facial features into a feature vector, and stores it in the back-end database. Amazon Rekognition uses feature vectors when performing face match and search operations using the and operations.

To get the number of faces in a collection, call .

If you are using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. To determine which version of the model you are using, call and supply the collection ID. You also get the model version from the value of FaceModelVersion in the response from IndexFaces.

For more information, see Model Versioning in the Amazon Rekognition Developer Guide.

If you provide the optional ExternalImageID for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

In response, the operation returns an array of metadata for all detected faces. This includes, the bounding box of the detected face, confidence value (indicating the bounding box contains a face), a face ID assigned by the service for each face that is detected and stored, and an image ID assigned by the service for the input image. If you request all facial attributes (using the detectionAttributes parameter, Amazon Rekognition returns detailed facial attributes such as facial landmarks (for example, location of eye and mouth) and other facial attributes such gender. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.

The input image is passed either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

This operation requires permissions to perform the rekognition:IndexFaces action.

" + "documentation":"

Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition does not save the actual faces detected. Instead, the underlying detection algorithm first detects the faces in the input image, and for each face extracts facial features into a feature vector, and stores it in the back-end database. Amazon Rekognition uses feature vectors when performing face match and search operations using the and operations.

To get the number of faces in a collection, call .

If you are using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. To determine which version of the model you are using, call and supply the collection ID. You also get the model version from the value of FaceModelVersion in the response from IndexFaces.

For more information, see Model Versioning in the Amazon Rekognition Developer Guide.

If you provide the optional ExternalImageID for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

You can specify the maximum number of faces to index with the MaxFaces input parameter. This is useful when you want to index the largest faces in an image, and you don't want to index other faces detected in the image.

The QualityFilter input parameter allows you to filter out detected faces that don’t meet the required quality bar chosen by Amazon Rekognition. The quality bar is based on a variety of common use cases.

In response, the operation returns an array of metadata for all detected faces, FaceRecords. This includes:

If you request all facial attributes (using the detectionAttributes parameter), Amazon Rekognition returns detailed facial attributes such as facial landmarks (for example, location of eye and mouth) and other facial attributes such gender. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

Information about faces detected in an image, but not indexed, is returned in an array of objects, UnindexedFaces. Faces are not indexed for reasons such as:

For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.

The input image is passed either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

This operation requires permissions to perform the rekognition:IndexFaces action.

" }, "ListCollections":{ "name":"ListCollections", @@ -1980,6 +1980,14 @@ "DetectionAttributes":{ "shape":"Attributes", "documentation":"

An array of facial attributes that you want to be returned. This can be the default list of attributes or all attributes. If you don't specify a value for Attributes or if you specify [\"DEFAULT\"], the API returns the following subset of facial attributes: BoundingBox, Confidence, Pose, Quality and Landmarks. If you provide [\"ALL\"], all facial attributes are returned but the operation will take longer to complete.

If you provide both, [\"ALL\", \"DEFAULT\"], the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).

" + }, + "MaxFaces":{ + "shape":"MaxFacesToIndex", + "documentation":"

The maximum number of faces to index. The value of MaxFaces must be greater than or equal to 1. IndexFaces returns no more that 100 detected faces in an image, even if you specify a larger value for MaxFaces.

If IndexFaces detects more faces than the value of MaxFaces, the faces with the lowest quality are filtered out first. If there are still more faces than the value of MaxFaces, the faces with the smallest bounding boxes are filtered out (up to the number needed to satisfy the value of MaxFaces). Information about the unindexed faces is available in the UnindexedFaces array.

The faces returned by IndexFaces are sorted, in descending order, by the largest face bounding box size, to the smallest.

" + }, + "QualityFilter":{ + "shape":"QualityFilter", + "documentation":"

Specifies how much filtering is done to identify faces detected with low quality. Filtered faces are not indexed. If you specify AUTO, filtering prioritizes the identification of faces that don’t meet the required quality bar chosen by Amazon Rekognition. The quality bar is based on a variety of common use cases. Low quality detections can arise for a number of reasons. For example, an object misidentified as a face, a face that is too blurry, or a face with a pose that is too extreme to use. If you specify NONE, no filtering is performed. The default value is NONE.

" } } }, @@ -1997,6 +2005,10 @@ "FaceModelVersion":{ "shape":"String", "documentation":"

Version number of the face detection model associated with the input collection (CollectionId).

" + }, + "UnindexedFaces":{ + "shape":"UnindexedFaces", + "documentation":"

An array of faces that detected in the image but not indexed either because the quality filter deemed them to be of low-quality or the MaxFaces request parameter filtered them out. To use the quality filter, you specify the QualityFilter request parameter.

" } } }, @@ -2274,6 +2286,10 @@ "max":4096, "min":1 }, + "MaxFacesToIndex":{ + "type":"integer", + "min":1 + }, "MaxResults":{ "type":"integer", "min":1 @@ -2478,6 +2494,28 @@ "documentation":"

The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Rekognition.

", "exception":true }, + "QualityFilter":{ + "type":"string", + "enum":[ + "NONE", + "AUTO" + ] + }, + "Reason":{ + "type":"string", + "enum":[ + "EXCEEDS_MAX_FACES", + "EXTREME_POSE", + "LOW_BRIGHTNESS", + "LOW_SHARPNESS", + "LOW_CONFIDENCE", + "SMALL_BOUNDING_BOX" + ] + }, + "Reasons":{ + "type":"list", + "member":{"shape":"Reason"} + }, "RecognizeCelebritiesRequest":{ "type":"structure", "required":["Image"], @@ -3055,6 +3093,24 @@ "type":"long", "min":0 }, + "UnindexedFace":{ + "type":"structure", + "members":{ + "Reasons":{ + "shape":"Reasons", + "documentation":"

An array of reasons specifying why a face was not indexed.

" + }, + "FaceDetail":{ + "shape":"FaceDetail", + "documentation":"

Structure containing attributes of a face that was detected, but not indexed, by IndexFaces.

" + } + }, + "documentation":"

A face detected by but not indexed. Use the Reasons response attribute to determine why a face is not indexed.

" + }, + "UnindexedFaces":{ + "type":"list", + "member":{"shape":"UnindexedFace"} + }, "Url":{"type":"string"}, "Urls":{ "type":"list", diff --git a/docs/source/conf.py b/docs/source/conf.py index 73a3b9a58d..c3e0d3d4f2 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ # The short X.Y version. version = '1.12' # The full version, including alpha/beta/rc tags. -release = '1.12.5' +release = '1.12.6' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/unit/test_awsrequest.py b/tests/unit/test_awsrequest.py index 6c69967192..903ce4b11a 100644 --- a/tests/unit/test_awsrequest.py +++ b/tests/unit/test_awsrequest.py @@ -128,6 +128,20 @@ def test_can_prepare_dict_body(self): prepared_request = request.prepare() self.assertEqual(prepared_request.body, 'dead=beef') + def test_can_prepare_dict_body_unicode_values(self): + body = {'Text': u'\u30c6\u30b9\u30c8 string'} + expected_body = 'Text=%E3%83%86%E3%82%B9%E3%83%88+string' + request = AWSRequest(url='http://example.com/', data=body) + prepared_request = request.prepare() + self.assertEqual(prepared_request.body, expected_body) + + def test_can_prepare_dict_body_unicode_keys(self): + body = {u'\u30c6\u30b9\u30c8': 'string'} + expected_body = '%E3%83%86%E3%82%B9%E3%83%88=string' + request = AWSRequest(url='http://example.com/', data=body) + prepared_request = request.prepare() + self.assertEqual(prepared_request.body, expected_body) + def test_can_prepare_empty_body(self): request = AWSRequest(url='http://example.com/', data=b'') prepared_request = request.prepare()