From e75fc29236fae56c78fa741c854f304b02272cff Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Wed, 8 Jun 2022 09:00:22 +0200 Subject: [PATCH] Rename aws_s3 to s3_object (and deprecate bucket creation/deleting) (#869) Rename aws_s3 to s3_object (and deprecate bucket creation/deleting) SUMMARY The aws_s3 module (as it's known today) is primarily for managing objects within S3. While it provides minimal support for creating S3 buckets, the feature set is very limited. Support for the advanced bucket management features is provided via the s3_bucket modules (such as managing encryption settings). Because the name aws_s3 often puts the module at the top of the list of modules, well away from the s3_bucket module, it can be difficult for folks to discover the s3_bucket module leading them to assume that we simply have no support for the more complex s3_bucket management features. As such, I suggest renaming the module to s3_object to make the intended scope more obvious and to improve the discoverability of s3_bucket. At this time I do not recommend setting a deprecation date for the alias, the cost of an alias is minimal and we've had a lot of churn recently. Additionally, deprecates the duplicated (but very limited) bucket creation/deletion functionality of aws_s3/s3_object ISSUE TYPE Feature Pull Request COMPONENT NAME aws_s3 (s3_object) ADDITIONAL INFORMATION See for example #866 where there was an attempt to create duplicate functionality. Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell Reviewed-by: Jill R --- changelogs/fragments/869-s3_object.yml | 7 + meta/runtime.yml | 7 +- plugins/action/{aws_s3.py => s3_object.py} | 8 +- plugins/modules/{aws_s3.py => s3_object.py} | 166 +++++++++++------- .../targets/{aws_s3 => s3_object}/aliases | 2 + .../{aws_s3 => s3_object}/defaults/main.yml | 0 .../{aws_s3 => s3_object}/files/hello.txt | 0 .../{aws_s3 => s3_object}/files/test.png | Bin .../{aws_s3 => s3_object}/meta/main.yml | 0 .../tasks/copy_object.yml | 18 +- .../tasks/delete_bucket.yml | 8 +- .../{aws_s3 => s3_object}/tasks/main.yml | 157 +++++++++-------- .../templates/policy.json.j2 | 0 .../templates/put-template.txt.j2 | 0 14 files changed, 225 insertions(+), 148 deletions(-) create mode 100644 changelogs/fragments/869-s3_object.yml rename plugins/action/{aws_s3.py => s3_object.py} (90%) rename plugins/modules/{aws_s3.py => s3_object.py} (89%) rename tests/integration/targets/{aws_s3 => s3_object}/aliases (55%) rename tests/integration/targets/{aws_s3 => s3_object}/defaults/main.yml (100%) rename tests/integration/targets/{aws_s3 => s3_object}/files/hello.txt (100%) rename tests/integration/targets/{aws_s3 => s3_object}/files/test.png (100%) rename tests/integration/targets/{aws_s3 => s3_object}/meta/main.yml (100%) rename tests/integration/targets/{aws_s3 => s3_object}/tasks/copy_object.yml (96%) rename tests/integration/targets/{aws_s3 => s3_object}/tasks/delete_bucket.yml (87%) rename tests/integration/targets/{aws_s3 => s3_object}/tasks/main.yml (93%) rename tests/integration/targets/{aws_s3 => s3_object}/templates/policy.json.j2 (100%) rename tests/integration/targets/{aws_s3 => s3_object}/templates/put-template.txt.j2 (100%) diff --git a/changelogs/fragments/869-s3_object.yml b/changelogs/fragments/869-s3_object.yml new file mode 100644 index 00000000000..6b62a9d647b --- /dev/null +++ b/changelogs/fragments/869-s3_object.yml @@ -0,0 +1,7 @@ +minor_changes: +- aws_s3 - The ``aws_s3`` module has been renamed to ``s3_object`` + (https://github.com/ansible-collections/amazon.aws/pull/869). +deprecated_features: +- s3_object - Support for creation and deletion of S3 buckets has been deprecated. Please use the + ``amazon.aws.s3_bucket`` module to create and delete buckets + (https://github.com/ansible-collections/amazon.aws/pull/869). diff --git a/meta/runtime.yml b/meta/runtime.yml index 74a8b36ffc9..4844c1eceaa 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -4,7 +4,6 @@ action_groups: - aws_az_info - aws_caller_info - aws_s3 - - aws_s3 - aws_secret - cloudformation - cloudformation_info @@ -48,7 +47,11 @@ action_groups: - iam - rds - s3_bucket + - s3_object plugin_routing: + action: + aws_s3: + redirect: amazon.aws.s3_object modules: aws_az_facts: deprecation: @@ -56,6 +59,8 @@ plugin_routing: warning_text: >- aws_az_facts was renamed in Ansible 2.9 to aws_az_info. Please update your tasks. + aws_s3: + redirect: amazon.aws.s3_object ec2: deprecation: removal_version: 4.0.0 diff --git a/plugins/action/aws_s3.py b/plugins/action/s3_object.py similarity index 90% rename from plugins/action/aws_s3.py rename to plugins/action/s3_object.py index a454922a101..91e98793f44 100644 --- a/plugins/action/aws_s3.py +++ b/plugins/action/s3_object.py @@ -31,7 +31,11 @@ class ActionModule(ActionBase): TRANSFERS_FILES = True def run(self, tmp=None, task_vars=None): - ''' handler for aws_s3 operations ''' + ''' handler for s3_object operations + + This adds the magic that means 'src' can point to both a 'remote' file + on the 'host' or in the 'files/' lookup path on the controller. + ''' self._supports_async = True if task_vars is None: @@ -59,7 +63,7 @@ def run(self, tmp=None, task_vars=None): raise AnsibleActionFail(to_text(e)) wrap_async = self._task.async_val and not self._connection.has_native_async - # execute the aws_s3 module with the updated args + # execute the s3_object module with the updated args result = merge_hash(result, self._execute_module(module_args=new_module_args, task_vars=task_vars, wrap_async=wrap_async)) if not wrap_async: diff --git a/plugins/modules/aws_s3.py b/plugins/modules/s3_object.py similarity index 89% rename from plugins/modules/aws_s3.py rename to plugins/modules/s3_object.py index 565c9d9d33f..535cd07c548 100644 --- a/plugins/modules/aws_s3.py +++ b/plugins/modules/s3_object.py @@ -8,13 +8,16 @@ DOCUMENTATION = ''' --- -module: aws_s3 +module: s3_object version_added: 1.0.0 -short_description: manage objects in S3. +short_description: Manage objects in S3 description: - - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and - deleting both objects and buckets, retrieving objects as files or strings, generating download links and - copy of an object that is already stored in Amazon S3. + - This module allows the user to manage the objects and directories within S3 buckets. Includes + support for creating and deleting objects and directories, retrieving objects as files or + strings, generating download links and copying objects that are already stored in Amazon S3. + - Support for creating or deleting S3 buckets with this module has been deprecated and will be + removed in release 6.0.0. + - S3 buckets can be created or deleted using the M(amazon.aws.s3_bucket) module. options: bucket: description: @@ -23,16 +26,18 @@ type: str dest: description: - - The destination file path when downloading an object/key with a C(GET) operation. + - The destination file path when downloading an object/key when I(mode=get). + - Ignored when I(mode) is not C(get). type: path encrypt: description: - - When set for PUT/COPY mode, asks for server-side encryption. + - Asks for server-side encryption of the objects when I(mode=put) or I(mode=copy). + - Ignored when I(mode) is neither C(put) nor C(copy). default: true type: bool encryption_mode: description: - - What encryption mode to use if I(encrypt=true). + - The encryption mode to use if I(encrypt=true). default: AES256 choices: - AES256 @@ -40,53 +45,66 @@ type: str expiry: description: - - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation. + - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a + I(mode=put) or I(mode=geturl) operation. + - Ignored when I(mode) is neither C(put) nor C(geturl). default: 600 aliases: ['expiration'] type: int headers: description: - - Custom headers for C(PUT) operation, as a dictionary of C(key=value) and C(key=value,key=value). + - Custom headers to use when I(mode=put) as a dictionary of key value pairs. + - Ignored when I(mode) is not C(put). type: dict marker: description: - - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order. + - Specifies the key to start with when using list mode. Object keys are returned in + alphabetical order, starting with key after the marker in order. type: str max_keys: description: - - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys. + - Max number of results to return when I(mode=list), set this if you want to retrieve fewer + than the default 1000 keys. + - Ignored when I(mode) is not C(list). default: 1000 type: int metadata: description: - - Metadata for PUT/COPY operation, as a dictionary of C(key=value) and C(key=value,key=value). + - Metadata to use when I(mode=put) or I(mode=copy) as a dictionary of key value pairs. type: dict mode: description: - Switches the module behaviour between - - 'C(PUT): upload' - - 'C(GET): download' + - 'C(put): upload' + - 'C(get): download' - 'C(geturl): return download URL' - 'C(getstr): download object as string' - 'C(list): list keys' - - 'C(create): create bucket' - - 'C(delete): delete bucket' + - 'C(create): create bucket directories' + - 'C(delete): delete bucket directories' - 'C(delobj): delete object' - 'C(copy): copy object that is already stored in another bucket' + - Support for creating and deleting buckets has been deprecated and will + be removed in release 6.0.0. To create and manage the bucket itself + please use the M(amazon.aws.s3_bucket) module. required: true choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list', 'copy'] type: str object: description: - - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. + - Keyname of the object inside the bucket. + - Can be used to create "virtual directories", see examples. type: str permission: description: - This option lets the user set the canned permissions on the object/bucket that are created. - The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or - C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read), - C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list; although only the first one - will be used during the initial upload of the file + The permissions that can be set are C(private), C(public-read), C(public-read-write), + C(authenticated-read) for a bucket or C(private), C(public-read), C(public-read-write), + C(aws-exec-read), C(authenticated-read), C(bucket-owner-read), C(bucket-owner-full-control) + for an object. Multiple permissions can be specified as a list; although only the first one + will be used during the initial upload of the file. + - For a full list of permissions see the AWS documentation + U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl). default: ['private'] type: list elements: str @@ -97,18 +115,23 @@ type: str version: description: - - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket. + - Version ID of the object inside the bucket. Can be used to get a specific version of a file + if versioning is enabled in the target bucket. type: str overwrite: description: - - Force overwrite either locally on the filesystem or remotely with the object/key. Used with C(PUT) and C(GET) operations. + - Force overwrite either locally on the filesystem or remotely with the object/key. + - Used when I(mode=put) or I(mode=get). + - Ignored when when I(mode) is neither C(put) nor C(get). - Must be a Boolean, C(always), C(never), C(different) or C(latest). - C(true) is the same as C(always). - C(false) is equal to C(never). - - When this is set to C(different) the MD5 sum of the local file is compared with the 'ETag' of the object/key in S3. - The ETag may or may not be an MD5 digest of the object data. See the ETag response header here + - When this is set to C(different) the MD5 sum of the local file is compared with the 'ETag' + of the object/key in S3. The ETag may or may not be an MD5 digest of the object data. See + the ETag response header here U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html). - - (C(GET) mode only) When this is set to C(latest) the last modified timestamp of local file is compared with the 'LastModified' of the object/key in S3. + - When I(mode=get) and I(overwrite=latest) the last modified timestamp of local file + is compared with the 'LastModified' of the object/key in S3. default: 'always' aliases: ['force'] type: str @@ -137,39 +160,45 @@ type: bool src: description: - - The source file path when performing a C(PUT) operation. - - Either I(content), I(content_base64) or I(src) must be specified for a C(PUT) operation. Ignored otherwise. + - The source file path when performing a C(put) operation. + - One of I(content), I(content_base64) or I(src) must be specified when I(mode=put) + otherwise ignored. type: path content: description: - - The content to C(PUT) into an object. - - The parameter value will be treated as a string and converted to UTF-8 before sending it to S3. - To send binary data, use the I(content_base64) parameter instead. - - Either I(content), I(content_base64) or I(src) must be specified for a C(PUT) operation. Ignored otherwise. + - The content to C(put) into an object. + - The parameter value will be treated as a string and converted to UTF-8 before sending it to + S3. + - To send binary data, use the I(content_base64) parameter instead. + - One of I(content), I(content_base64) or I(src) must be specified when I(mode=put) + otherwise ignored. version_added: "1.3.0" type: str content_base64: description: - - The base64-encoded binary data to C(PUT) into an object. + - The base64-encoded binary data to C(put) into an object. - Use this if you need to put raw binary data, and don't forget to encode in base64. - - Either I(content), I(content_base64) or I(src) must be specified for a C(PUT) operation. Ignored otherwise. + - One of I(content), I(content_base64) or I(src) must be specified when I(mode=put) + otherwise ignored. version_added: "1.3.0" type: str ignore_nonexistent_bucket: description: - - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the - C(GetObject) permission but no other permissions. In this case using the option mode: get will fail without specifying - I(ignore_nonexistent_bucket=true)." + - Overrides initial bucket lookups in case bucket or IAM policies are restrictive. + - This can be useful when a user may have the C(GetObject) permission but no other + permissions. In which case using I(mode=get) will fail unless + I(ignore_nonexistent_bucket=true) is specified. type: bool default: false encryption_kms_key_id: description: - - KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms). + - KMS key id to use when encrypting objects using I(encrypting=aws:kms). + - Ignored if I(encryption) is not C(aws:kms). type: str copy_src: description: - The source details of the object to copy. - - Required if I(mode) is C(copy). + - Required if I(mode=copy). type: dict version_added: 2.0.0 suboptions: @@ -191,39 +220,40 @@ description: - Whether the bucket name should be validated to conform to AWS S3 naming rules. - On by default, this may be disabled for S3 backends that do not enforce these rules. - - See https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html + - See the Amazon documentation for more information about bucket naming rules + U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). type: bool version_added: 3.1.0 default: True author: - - "Lester Wade (@lwade)" - - "Sloane Hertel (@s-hertel)" - - "Alina Buzachis (@linabuzachis)" + - "Lester Wade (@lwade)" + - "Sloane Hertel (@s-hertel)" + - "Alina Buzachis (@linabuzachis)" notes: -- Support for I(tags) and I(purge_tags) was added in release 2.0.0. + - Support for I(tags) and I(purge_tags) was added in release 2.0.0. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.tags + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags ''' EXAMPLES = ''' - name: Simple PUT operation - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket object: /my/desired/key.txt src: /usr/local/myfile.txt mode: put - name: PUT operation from a rendered template - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket object: /object.yaml content: "{{ lookup('template', 'templates/object.yaml.j2') }}" mode: put - name: Simple PUT operation in Ceph RGW S3 - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket object: /my/desired/key.txt src: /usr/local/myfile.txt @@ -232,14 +262,14 @@ s3_url: "http://localhost:8000" - name: Simple GET operation - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket object: /my/desired/key.txt dest: /usr/local/myfile.txt mode: get - name: Get a specific version of an object. - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket object: /my/desired/key.txt version: 48c9ee5131af7a716edc22df9772aa6f @@ -247,7 +277,7 @@ mode: get - name: PUT/upload with metadata - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket object: /my/desired/key.txt src: /usr/local/myfile.txt @@ -255,7 +285,7 @@ metadata: 'Content-Encoding=gzip,Cache-Control=no-cache' - name: PUT/upload with custom headers - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket object: /my/desired/key.txt src: /usr/local/myfile.txt @@ -263,12 +293,12 @@ headers: 'x-amz-grant-full-control=emailAddress=owner@example.com' - name: List keys simple - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket mode: list - name: List keys all options - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket mode: list prefix: /my/desired/ @@ -276,25 +306,25 @@ max_keys: 472 - name: Create an empty bucket - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket mode: create permission: public-read - name: Create a bucket with key as directory, in the EU region - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket object: /my/directory/path mode: create region: eu-west-1 - name: Delete a bucket and all contents - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket mode: delete - name: GET an object but don't download if the file checksums match. New in 2.0 - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket object: /my/desired/key.txt dest: /usr/local/myfile.txt @@ -302,13 +332,13 @@ overwrite: different - name: Delete an object from a bucket - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket object: /my/desired/key.txt mode: delobj - name: Copy an object already stored in another bucket - amazon.aws.aws_s3: + amazon.aws.s3_object: bucket: mybucket object: /my/desired/key.txt mode: copy @@ -462,6 +492,9 @@ def bucket_check(module, s3, bucket, validate=True): def create_bucket(module, s3, bucket, location=None): + module.deprecate('Support for creating S3 buckets using the s3_object module' + ' has been deprecated. Please use the ``s3_bucket`` module' + ' instead.', version='6.0.0', collection_name='amazon.aws') if module.check_mode: module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True) configuration = {} @@ -518,6 +551,9 @@ def list_keys(module, s3, bucket, prefix, marker, max_keys): def delete_bucket(module, s3, bucket): + module.deprecate('Support for deleting S3 buckets using the s3_object module' + ' has been deprecated. Please use the ``s3_bucket`` module' + ' instead.', version='6.0.0', collection_name='amazon.aws') if module.check_mode: module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) try: @@ -900,6 +936,10 @@ def ensure_tags(client, module, bucket, obj): def main(): + # Beware: this module uses an action plugin (plugins/action/s3_object.py) + # so that src parameter can be either in 'files/' lookup path on the + # controller, *or* on the remote host that the task is executed on. + argument_spec = dict( bucket=dict(required=True), dest=dict(default=None, type='path'), diff --git a/tests/integration/targets/aws_s3/aliases b/tests/integration/targets/s3_object/aliases similarity index 55% rename from tests/integration/targets/aws_s3/aliases rename to tests/integration/targets/s3_object/aliases index 4ef4b2067d0..fbc8c8e2d2d 100644 --- a/tests/integration/targets/aws_s3/aliases +++ b/tests/integration/targets/s3_object/aliases @@ -1 +1,3 @@ cloud/aws + +aws_s3 diff --git a/tests/integration/targets/aws_s3/defaults/main.yml b/tests/integration/targets/s3_object/defaults/main.yml similarity index 100% rename from tests/integration/targets/aws_s3/defaults/main.yml rename to tests/integration/targets/s3_object/defaults/main.yml diff --git a/tests/integration/targets/aws_s3/files/hello.txt b/tests/integration/targets/s3_object/files/hello.txt similarity index 100% rename from tests/integration/targets/aws_s3/files/hello.txt rename to tests/integration/targets/s3_object/files/hello.txt diff --git a/tests/integration/targets/aws_s3/files/test.png b/tests/integration/targets/s3_object/files/test.png similarity index 100% rename from tests/integration/targets/aws_s3/files/test.png rename to tests/integration/targets/s3_object/files/test.png diff --git a/tests/integration/targets/aws_s3/meta/main.yml b/tests/integration/targets/s3_object/meta/main.yml similarity index 100% rename from tests/integration/targets/aws_s3/meta/main.yml rename to tests/integration/targets/s3_object/meta/main.yml diff --git a/tests/integration/targets/aws_s3/tasks/copy_object.yml b/tests/integration/targets/s3_object/tasks/copy_object.yml similarity index 96% rename from tests/integration/targets/aws_s3/tasks/copy_object.yml rename to tests/integration/targets/s3_object/tasks/copy_object.yml index 18a3e9eb11d..aff38eba1bc 100644 --- a/tests/integration/targets/aws_s3/tasks/copy_object.yml +++ b/tests/integration/targets/s3_object/tasks/copy_object.yml @@ -6,7 +6,7 @@ dst: "{{ bucket_name }}-copydst" - name: create bucket source - aws_s3: + s3_object: bucket: "{{ copy_bucket.src }}" mode: create @@ -15,7 +15,7 @@ content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}" - name: Put a content in the source bucket - aws_s3: + s3_object: bucket: "{{ copy_bucket.src }}" mode: put content: "{{ content }}" @@ -29,7 +29,7 @@ until: "put_result.msg == 'PUT operation complete'" - name: Copy the content of the source bucket into dest bucket - aws_s3: + s3_object: bucket: "{{ copy_bucket.dst }}" mode: copy object: destination.txt @@ -38,7 +38,7 @@ object: source.txt - name: Get the content copied into {{ copy_bucket.dst }} - aws_s3: + s3_object: bucket: "{{ copy_bucket.dst }}" mode: getstr object: destination.txt @@ -50,7 +50,7 @@ - content == copy_content.contents - name: Get the download url for object copied into {{ copy_bucket.dst }} - aws_s3: + s3_object: bucket: "{{ copy_bucket.dst }}" mode: geturl object: destination.txt @@ -62,7 +62,7 @@ - put_result.tags == copy_url.tags - name: Copy the same content from the source bucket into dest bucket (idempotency) - aws_s3: + s3_object: bucket: "{{ copy_bucket.dst }}" mode: copy object: destination.txt @@ -78,7 +78,7 @@ - "copy_idempotency.msg == 'ETag from source and destination are the same'" - name: Copy object with tags - aws_s3: + s3_object: bucket: "{{ copy_bucket.dst }}" mode: copy object: destination.txt @@ -96,7 +96,7 @@ - copy_result.tags['ansible_release'] == '2.0.1' - name: Copy object with tags (idempotency) - aws_s3: + s3_object: bucket: "{{ copy_bucket.dst }}" mode: copy object: destination.txt @@ -113,7 +113,7 @@ - copy_result is not changed - name: Copy from unexisting key should not succeed - aws_s3: + s3_object: bucket: "{{ copy_bucket.dst }}" mode: copy object: missing_key.txt diff --git a/tests/integration/targets/aws_s3/tasks/delete_bucket.yml b/tests/integration/targets/s3_object/tasks/delete_bucket.yml similarity index 87% rename from tests/integration/targets/aws_s3/tasks/delete_bucket.yml rename to tests/integration/targets/s3_object/tasks/delete_bucket.yml index bb6bca52ff8..bf6721f2c56 100644 --- a/tests/integration/targets/aws_s3/tasks/delete_bucket.yml +++ b/tests/integration/targets/s3_object/tasks/delete_bucket.yml @@ -1,24 +1,24 @@ - name: delete bucket at the end of Integration tests block: - name: list bucket object - aws_s3: + s3_object: bucket: "{{ item }}" mode: list register: objects ignore_errors: true - name: remove objects from bucket - aws_s3: + s3_object: bucket: "{{ item }}" mode: delobj object: "{{ obj }}" with_items: "{{ objects.s3_keys }}" loop_control: - loop_var: obj + loop_var: obj ignore_errors: true - name: delete the bucket - aws_s3: + s3_object: bucket: "{{ item }}" mode: delete ignore_errors: yes diff --git a/tests/integration/targets/aws_s3/tasks/main.yml b/tests/integration/targets/s3_object/tasks/main.yml similarity index 93% rename from tests/integration/targets/aws_s3/tasks/main.yml rename to tests/integration/targets/s3_object/tasks/main.yml index ec69fa25062..d6175e042e8 100644 --- a/tests/integration/targets/aws_s3/tasks/main.yml +++ b/tests/integration/targets/s3_object/tasks/main.yml @@ -1,5 +1,5 @@ --- -# Integration tests for aws_s3 +# Integration tests for s3_object - module_defaults: group/aws: aws_access_key: "{{ aws_access_key }}" @@ -28,7 +28,7 @@ - name: test create bucket without permissions module_defaults: { group/aws: {} } - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: create register: result @@ -40,7 +40,7 @@ - "result.msg != 'MODULE FAILURE'" - name: test create bucket with an invalid name - aws_s3: + s3_object: bucket: "{{ bucket_name }}-" mode: create register: result @@ -51,7 +51,7 @@ - result is failed - name: test create bucket - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: create register: result @@ -61,7 +61,7 @@ - result is changed - name: trying to create a bucket name that already exists - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: create register: result @@ -82,7 +82,7 @@ register: upload_file - name: test putting an object in the bucket - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ tmpdir.path }}/upload.txt" @@ -96,8 +96,8 @@ - result is changed - result.msg == "PUT operation complete" - - name: test using aws_s3 with async - aws_s3: + - name: test using s3_object with async + s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ tmpdir.path }}/upload.txt" @@ -114,7 +114,7 @@ retries: 10 - name: test put with overwrite=different and unmodified object - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ tmpdir.path }}/upload.txt" @@ -129,11 +129,30 @@ - result is not changed - name: check that roles file lookups work as expected + s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: hello.txt + object: delete.txt + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is changed + - result.msg == "PUT operation complete" + + # s3_object (and its old alias) use an action plugin to support using the + # 'file' lookup path or a remote path. Keeping this working is dependent on + # having a redirect for both the module and the action plugin + - name: check that roles file lookups work as expected when using old name aws_s3: bucket: "{{ bucket_name }}" mode: put src: hello.txt object: delete.txt + overwrite: always retries: 3 delay: 3 register: result @@ -144,7 +163,7 @@ - result.msg == "PUT operation complete" - name: test put with overwrite=never - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ tmpdir.path }}/upload.txt" @@ -159,7 +178,7 @@ - result is not changed - name: test put with overwrite=different and modified object - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ tmpdir.path }}/upload.txt" @@ -174,7 +193,7 @@ - result is changed - name: test put with overwrite=always - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ tmpdir.path }}/upload.txt" @@ -189,7 +208,7 @@ - result is changed - name: test get object - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download.txt" @@ -210,7 +229,7 @@ - upload_file.stat.checksum == download_file.stat.checksum - name: test get with overwrite=different and identical files - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download.txt" @@ -230,7 +249,7 @@ src: hello.txt - name: test get with overwrite=never - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download.txt" @@ -245,7 +264,7 @@ - result is not changed - name: test get with overwrite=different and modified file - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download.txt" @@ -260,7 +279,7 @@ - result is changed - name: test get with overwrite=always - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download.txt" @@ -275,7 +294,7 @@ - result is changed - name: test get with overwrite=latest and identical files - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download.txt" @@ -293,7 +312,7 @@ shell: touch -mt 197001010900.00 "{{ tmpdir.path }}/download.txt" - name: test get with overwrite=latest and files that mtimes are different - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download.txt" @@ -308,7 +327,7 @@ - result is changed - name: test geturl of the object - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: geturl object: delete.txt @@ -323,7 +342,7 @@ - result is changed - name: test getstr of the object - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: getstr object: delete.txt @@ -337,7 +356,7 @@ - result.contents == content - name: test list to get all objects in the bucket - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: list retries: 3 @@ -350,7 +369,7 @@ - result.msg == "LIST operation complete" - name: test delobj to just delete an object in the bucket - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: delobj object: delete.txt @@ -364,7 +383,7 @@ - result is changed - name: test putting an encrypted object in the bucket - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ tmpdir.path }}/upload.txt" @@ -380,7 +399,7 @@ - result.msg == "PUT operation complete" - name: test get encrypted object - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download_encrypted.txt" @@ -401,7 +420,7 @@ - upload_file.stat.checksum == download_file.stat.checksum - name: delete encrypted file - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: delobj object: delete_encrypt.txt @@ -409,7 +428,7 @@ delay: 3 - name: test putting an aws:kms encrypted object in the bucket - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ tmpdir.path }}/upload.txt" @@ -426,7 +445,7 @@ - result.msg == "PUT operation complete" - name: test get KMS encrypted object - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download_kms.txt" @@ -449,7 +468,7 @@ # FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted - name: delete KMS encrypted file - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: delobj object: delete_encrypt_kms.txt @@ -461,7 +480,7 @@ # PRs exist for that, but propose deferring until after merge. - name: test creation of empty path - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: create object: foo/bar/baz/ @@ -475,7 +494,7 @@ - result is changed - name: test deletion of empty path - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: delobj object: foo/bar/baz/ @@ -483,7 +502,7 @@ delay: 3 - name: test delete bucket - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: delete register: result @@ -496,7 +515,7 @@ - result is changed - name: test create a bucket with a dot in the name - aws_s3: + s3_object: bucket: "{{ bucket_name_with_dot }}" mode: create register: result @@ -506,7 +525,7 @@ - result is changed - name: test delete a bucket with a dot in the name - aws_s3: + s3_object: bucket: "{{ bucket_name_with_dot }}" mode: delete register: result @@ -516,7 +535,7 @@ - result is changed - name: test delete a nonexistent bucket - aws_s3: + s3_object: bucket: "{{ bucket_name_with_dot }}" mode: delete register: result @@ -538,19 +557,19 @@ - name: test multipart download - platform specific block: - name: make a bucket to upload the file - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: create - name: upload the file to the bucket - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ tmpdir.path }}/largefile" object: multipart.txt - name: download file once - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download.txt" @@ -566,7 +585,7 @@ - result is changed - name: download file again - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download.txt" @@ -591,7 +610,7 @@ - bucket_with_policy is changed - name: fail to upload the file to the bucket with an ACL - aws_s3: + s3_object: bucket: "{{ bucket_name_acl }}" mode: put src: "{{ tmpdir.path }}/upload.txt" @@ -607,7 +626,7 @@ # - upload_private is failed - name: upload the file to the bucket with an ACL - aws_s3: + s3_object: bucket: "{{ bucket_name_acl }}" mode: put src: "{{ tmpdir.path }}/upload.txt" @@ -621,7 +640,7 @@ - upload_owner is changed - name: create an object from static content - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -634,7 +653,7 @@ - result is changed - name: ensure idempotency on static content - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -648,7 +667,7 @@ - result is not changed - name: fetch test content - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: getstr object: put-content.txt @@ -662,7 +681,7 @@ put_template_text: test template - name: create an object from a template - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-template.txt mode: put @@ -674,7 +693,7 @@ - result is changed - name: fetch template content - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: getstr object: put-template.txt @@ -690,7 +709,7 @@ register: put_binary - name: create an object from binary data - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-binary.bin mode: put @@ -702,7 +721,7 @@ - result is changed - name: fetch binary content - aws_s3: + s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ tmpdir.path }}/download_binary.bin" @@ -724,12 +743,12 @@ - include_tasks: copy_object.yml - # ============================================================ + # ============================================================ - name: 'Run tagging tests' block: - # ============================================================ + # ============================================================ - name: create an object from static content - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -749,7 +768,7 @@ - result.tags["Tag Two"] == 'two {{ resource_prefix }}' - name: ensure idempotency on static content - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -770,7 +789,7 @@ - result.tags["Tag Two"] == 'two {{ resource_prefix }}' - name: Remove a tag from an S3 object - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -789,7 +808,7 @@ - "'Tag Two' not in result.tags" - name: Remove the tag from an S3 object (idempotency) - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -807,9 +826,9 @@ - (result.tags | length) == 1 - result.tags["tag_one"] == "{{ resource_prefix }} One" - "'Tag Two' not in result.tags" - + - name: Add a tag for an S3 object with purge_tags False - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -830,7 +849,7 @@ - result.tags["tag_one"] == '{{ resource_prefix }} One' - name: Add a tag for an S3 object with purge_tags False (idempotency) - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -849,9 +868,9 @@ - (result.tags | length) == 2 - result.tags["tag_three"] == '{{ resource_prefix }} Three' - result.tags["tag_one"] == '{{ resource_prefix }} One' - + - name: Update tags for an S3 object with purge_tags False - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -873,7 +892,7 @@ - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' - name: Update tags for an S3 object with purge_tags False (idempotency) - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -893,9 +912,9 @@ - result.tags["tag_one"] == '{{ resource_prefix }} One' - result.tags["tag_three"] == '{{ resource_prefix }} Three' - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' - + - name: Specify empty tags for an S3 object with purge_tags False - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -914,9 +933,9 @@ - result.tags["tag_one"] == '{{ resource_prefix }} One' - result.tags["tag_three"] == '{{ resource_prefix }} Three' - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' - + - name: Do not specify any tag to ensure previous tags are not removed - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -933,9 +952,9 @@ - result.tags["tag_one"] == '{{ resource_prefix }} One' - result.tags["tag_three"] == '{{ resource_prefix }} Three' - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' - + - name: Remove all tags - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -950,9 +969,9 @@ - result is changed - "'tags' in result" - (result.tags | length) == 0 - + - name: Remove all tags (idempotency) - aws_s3: + s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -967,7 +986,7 @@ - result is not changed - "'tags' in result" - (result.tags | length) == 0 - + always: - name: delete temporary files diff --git a/tests/integration/targets/aws_s3/templates/policy.json.j2 b/tests/integration/targets/s3_object/templates/policy.json.j2 similarity index 100% rename from tests/integration/targets/aws_s3/templates/policy.json.j2 rename to tests/integration/targets/s3_object/templates/policy.json.j2 diff --git a/tests/integration/targets/aws_s3/templates/put-template.txt.j2 b/tests/integration/targets/s3_object/templates/put-template.txt.j2 similarity index 100% rename from tests/integration/targets/aws_s3/templates/put-template.txt.j2 rename to tests/integration/targets/s3_object/templates/put-template.txt.j2