From ec192578e0ccc54bb9d16e79101ca93e51d9ceb1 Mon Sep 17 00:00:00 2001 From: Elad Ben-Israel Date: Wed, 24 Oct 2018 16:08:40 +0300 Subject: [PATCH] fix(aws-s3-deployment): avoid deletion during update using physical ids When a BucketDeployment resource is created, a unique physical ID is generated and returned via `ResourcePhysicalId`. The same ID will then be used in update/delete. This tells CloudFormation not to issue a DELETE operation after an UPDATE, which was the cause for #981. Also, allow destination prefix to be "/", which is the same as not specifying a prefix. Fixes #981 --- .../aws-s3-deployment/lambda/src/index.py | 35 ++++- .../aws-s3-deployment/lambda/test/test.py | 145 +++++++++++++++--- .../lib/bucket-deployment.ts | 16 +- .../@aws-cdk/aws-s3-deployment/lib/source.ts | 13 +- .../integ.bucket-deployment.expected.json | 5 +- .../test/integ.bucket-deployment.ts | 3 +- packages/@aws-cdk/cdk/package-lock.json | 2 +- 7 files changed, 183 insertions(+), 36 deletions(-) diff --git a/packages/@aws-cdk/aws-s3-deployment/lambda/src/index.py b/packages/@aws-cdk/aws-s3-deployment/lambda/src/index.py index eb4c9a0bcc155..64759acd21a46 100644 --- a/packages/@aws-cdk/aws-s3-deployment/lambda/src/index.py +++ b/packages/@aws-cdk/aws-s3-deployment/lambda/src/index.py @@ -5,6 +5,8 @@ import json import traceback import logging +from uuid import uuid4 + from botocore.vendored import requests from zipfile import ZipFile @@ -17,7 +19,7 @@ def handler(event, context): def cfn_error(message=None): - logger.info("| cfn_error: %s" % message) + logger.error("| cfn_error: %s" % message) cfn_send(event, context, CFN_FAILED, reason=message) try: @@ -29,41 +31,64 @@ def cfn_error(message=None): # extract resource properties props = event['ResourceProperties'] old_props = event.get('OldResourceProperties', {}) + physical_id = event.get('PhysicalResourceId', None) try: source_bucket_name = props['SourceBucketName'] source_object_key = props['SourceObjectKey'] dest_bucket_name = props['DestinationBucketName'] dest_bucket_prefix = props.get('DestinationBucketKeyPrefix', '') - retain_on_delete = props.get('RetainOnDelete', "false") == "true" + retain_on_delete = props.get('RetainOnDelete', "true") == "true" except KeyError as e: - cfn_error("missing request resource property %s" % str(e)) + cfn_error("missing request resource property %s. props: %s" % (str(e), props)) return + # treat "/" as if no prefix was specified + if dest_bucket_prefix == "/": + dest_bucket_prefix = "" + s3_source_zip = "s3://%s/%s" % (source_bucket_name, source_object_key) s3_dest = "s3://%s/%s" % (dest_bucket_name, dest_bucket_prefix) old_s3_dest = "s3://%s/%s" % (old_props.get("DestinationBucketName", ""), old_props.get("DestinationBucketKeyPrefix", "")) + + # obviously this is not + if old_s3_dest == "s3:///": + old_s3_dest = None + logger.info("| s3_dest: %s" % s3_dest) logger.info("| old_s3_dest: %s" % old_s3_dest) + # if we are creating a new resource, allocate a physical id for it + # otherwise, we expect physical id to be relayed by cloudformation + if request_type == "Create": + physical_id = "aws.cdk.s3deployment.%s" % str(uuid4()) + else: + if not physical_id: + cfn_error("invalid request: request type is '%s' but 'PhysicalResourceId' is not defined" % request_type) + return + # delete or create/update (only if "retain_on_delete" is false) if request_type == "Delete" and not retain_on_delete: aws_command("s3", "rm", s3_dest, "--recursive") # if we are updating without retention and the destination changed, delete first if request_type == "Update" and not retain_on_delete and old_s3_dest != s3_dest: + if not old_s3_dest: + logger.warn("cannot delete old resource without old resource properties") + return + aws_command("s3", "rm", old_s3_dest, "--recursive") if request_type == "Update" or request_type == "Create": s3_deploy(s3_source_zip, s3_dest) - cfn_send(event, context, CFN_SUCCESS) + cfn_send(event, context, CFN_SUCCESS, physicalResourceId=physical_id) except KeyError as e: cfn_error("invalid request. Missing key %s" % str(e)) except Exception as e: logger.exception(e) - cfn_error() + cfn_error(str(e)) #--------------------------------------------------------------------------------------------------- # populate all files from s3_source_zip to a destination bucket diff --git a/packages/@aws-cdk/aws-s3-deployment/lambda/test/test.py b/packages/@aws-cdk/aws-s3-deployment/lambda/test/test.py index b5c801b5959f7..17dd4c1f67502 100644 --- a/packages/@aws-cdk/aws-s3-deployment/lambda/test/test.py +++ b/packages/@aws-cdk/aws-s3-deployment/lambda/test/test.py @@ -12,7 +12,6 @@ class TestHandler(unittest.TestCase): def setUp(self): logger = logging.getLogger() - logger.addHandler(logging.NullHandler()) # clean up old aws.out file (from previous runs) try: os.remove("aws.out") @@ -20,7 +19,7 @@ def setUp(self): def test_invalid_request(self): resp = invoke_handler("Create", {}, expected_status="FAILED") - self.assertEqual(resp["Reason"], "missing request resource property 'SourceBucketName'") + self.assertEqual(resp["Reason"], "missing request resource property 'SourceBucketName'. props: {}") def test_create_update(self): invoke_handler("Create", { @@ -34,6 +33,20 @@ def test_create_update(self): "s3 sync --delete contents.zip s3:///" ) + def test_create_with_backslash_prefix_same_as_no_prefix(self): + invoke_handler("Create", { + "SourceBucketName": "", + "SourceObjectKey": "", + "DestinationBucketName": "", + "DestinationBucketKeyPrefix": "/" + }) + + self.assertAwsCommands( + "s3 cp s3:/// archive.zip", + "s3 sync --delete contents.zip s3:///" + ) + + def test_create_update_with_dest_key(self): invoke_handler("Create", { "SourceBucketName": "", @@ -47,12 +60,13 @@ def test_create_update_with_dest_key(self): "s3 sync --delete contents.zip s3:///" ) - def test_delete(self): + def test_delete_no_retain(self): invoke_handler("Delete", { "SourceBucketName": "", "SourceObjectKey": "", - "DestinationBucketName": "" - }) + "DestinationBucketName": "", + "RetainOnDelete": "false" + }, physical_id="") self.assertAwsCommands("s3 rm s3:/// --recursive") @@ -61,18 +75,30 @@ def test_delete_with_dest_key(self): "SourceBucketName": "", "SourceObjectKey": "", "DestinationBucketName": "", - "DestinationBucketKeyPrefix": "" - }) + "DestinationBucketKeyPrefix": "", + "RetainOnDelete": "false" + }, physical_id="") self.assertAwsCommands("s3 rm s3:/// --recursive") - def test_delete_with_retain(self): + def test_delete_with_retain_explicit(self): invoke_handler("Delete", { "SourceBucketName": "", "SourceObjectKey": "", "DestinationBucketName": "", "RetainOnDelete": "true" - }) + }, physical_id="") + + # no aws commands (retain) + self.assertAwsCommands() + + # RetainOnDelete=true is the default + def test_delete_with_retain_implicit_default(self): + invoke_handler("Delete", { + "SourceBucketName": "", + "SourceObjectKey": "", + "DestinationBucketName": "" + }, physical_id="") # no aws commands (retain) self.assertAwsCommands() @@ -83,7 +109,7 @@ def test_delete_with_retain_explicitly_false(self): "SourceObjectKey": "", "DestinationBucketName": "", "RetainOnDelete": "false" - }) + }, physical_id="") self.assertAwsCommands( "s3 rm s3:/// --recursive" @@ -100,7 +126,7 @@ def test_update_same_dest(self): "DestinationBucketName": "", }, old_resource_props={ "DestinationBucketName": "", - }) + }, physical_id="") self.assertAwsCommands( "s3 cp s3:/// archive.zip", @@ -115,23 +141,24 @@ def test_update_new_dest_retain(self): }, old_resource_props={ "DestinationBucketName": "", "RetainOnDelete": "true" - }) + }, physical_id="") self.assertAwsCommands( "s3 cp s3:/// archive.zip", "s3 sync --delete contents.zip s3:///" ) - def test_update_new_dest_no_retain_explicit(self): + def test_update_new_dest_no_retain(self): invoke_handler("Update", { "SourceBucketName": "", "SourceObjectKey": "", "DestinationBucketName": "", + "RetainOnDelete": "false" }, old_resource_props={ "DestinationBucketName": "", "DestinationBucketKeyPrefix": "", "RetainOnDelete": "false" - }) + }, physical_id="") self.assertAwsCommands( "s3 rm s3:/// --recursive", @@ -139,7 +166,7 @@ def test_update_new_dest_no_retain_explicit(self): "s3 sync --delete contents.zip s3:///" ) - def test_update_new_dest_no_retain_implicit(self): + def test_update_new_dest_retain_implicit(self): invoke_handler("Update", { "SourceBucketName": "", "SourceObjectKey": "", @@ -147,23 +174,24 @@ def test_update_new_dest_no_retain_implicit(self): }, old_resource_props={ "DestinationBucketName": "", "DestinationBucketKeyPrefix": "" - }) + }, physical_id="") self.assertAwsCommands( - "s3 rm s3:/// --recursive", "s3 cp s3:/// archive.zip", "s3 sync --delete contents.zip s3:///" ) - def test_update_new_dest_prefix_no_retain_implicit(self): + def test_update_new_dest_prefix_no_retain(self): invoke_handler("Update", { "SourceBucketName": "", "SourceObjectKey": "", "DestinationBucketName": "", - "DestinationBucketKeyPrefix": "" + "DestinationBucketKeyPrefix": "", + "RetainOnDelete": "false" }, old_resource_props={ "DestinationBucketName": "", - }) + "RetainOnDelete": "false" + }, physical_id="") self.assertAwsCommands( "s3 rm s3:/// --recursive", @@ -171,6 +199,78 @@ def test_update_new_dest_prefix_no_retain_implicit(self): "s3 sync --delete contents.zip s3:///" ) + def test_update_new_dest_prefix_retain_implicit(self): + invoke_handler("Update", { + "SourceBucketName": "", + "SourceObjectKey": "", + "DestinationBucketName": "", + "DestinationBucketKeyPrefix": "" + }, old_resource_props={ + "DestinationBucketName": "", + }, physical_id="") + + self.assertAwsCommands( + "s3 cp s3:/// archive.zip", + "s3 sync --delete contents.zip s3:///" + ) + + # + # physical id + # + + def test_physical_id_allocated_on_create_and_reused_afterwards(self): + create_resp = invoke_handler("Create", { + "SourceBucketName": "", + "SourceObjectKey": "", + "DestinationBucketName": "", + }) + + phid = create_resp['PhysicalResourceId'] + self.assertTrue(phid.startswith('aws.cdk.s3deployment')) + + # now issue an update and pass in the physical id. expect the same + # one to be returned back + update_resp = invoke_handler("Update", { + "SourceBucketName": "", + "SourceObjectKey": "", + "DestinationBucketName": "", + }, old_resource_props={ + "DestinationBucketName": "", + }, physical_id=phid) + self.assertEqual(update_resp['PhysicalResourceId'], phid) + + # now issue a delete, and make sure this also applies + delete_resp = invoke_handler("Delete", { + "SourceBucketName": "", + "SourceObjectKey": "", + "DestinationBucketName": "", + "RetainOnDelete": "false" + }, physical_id=phid) + self.assertEqual(delete_resp['PhysicalResourceId'], phid) + + def test_fails_when_physical_id_not_present_in_update(self): + update_resp = invoke_handler("Update", { + "SourceBucketName": "", + "SourceObjectKey": "", + "DestinationBucketName": "", + }, old_resource_props={ + "DestinationBucketName": "", + }, expected_status="FAILED") + + self.assertEqual(update_resp['Reason'], "invalid request: request type is 'Update' but 'PhysicalResourceId' is not defined") + + def test_fails_when_physical_id_not_present_in_delete(self): + update_resp = invoke_handler("Delete", { + "SourceBucketName": "", + "SourceObjectKey": "", + "DestinationBucketName": "", + }, old_resource_props={ + "DestinationBucketName": "", + }, expected_status="FAILED") + + self.assertEqual(update_resp['Reason'], "invalid request: request type is 'Delete' but 'PhysicalResourceId' is not defined") + + # asserts that a given list of "aws xxx" commands have been invoked (in order) def assertAwsCommands(self, *expected): actual = read_aws_out() @@ -193,7 +293,7 @@ def read_aws_out(): # requestType: CloudFormation request type ("Create", "Update", "Delete") # resourceProps: map to pass to "ResourceProperties" # expected_status: "SUCCESS" or "FAILED" -def invoke_handler(requestType, resourceProps, old_resource_props=None, expected_status='SUCCESS'): +def invoke_handler(requestType, resourceProps, old_resource_props=None, physical_id=None, expected_status='SUCCESS'): response_url = '' event={ @@ -208,6 +308,9 @@ def invoke_handler(requestType, resourceProps, old_resource_props=None, expected if old_resource_props: event['OldResourceProperties'] = old_resource_props + if physical_id: + event['PhysicalResourceId'] = physical_id + class ContextMock: log_stream_name = 'log_stream' class ResponseMock: reason = 'OK' diff --git a/packages/@aws-cdk/aws-s3-deployment/lib/bucket-deployment.ts b/packages/@aws-cdk/aws-s3-deployment/lib/bucket-deployment.ts index 396280b8b0c48..a438af1a8b682 100644 --- a/packages/@aws-cdk/aws-s3-deployment/lib/bucket-deployment.ts +++ b/packages/@aws-cdk/aws-s3-deployment/lib/bucket-deployment.ts @@ -19,16 +19,22 @@ export interface BucketDeploymentProps { destinationBucket: s3.BucketRef; /** - * Key prefix in desination. - * @default No prefix (source == dest) + * Key prefix in the destination bucket. + * + * @default "/" (unzip to root of the destination bucket) */ destinationKeyPrefix?: string; /** - * If this is enabled, files in destination bucket/prefix will not be deleted - * when the resource is deleted or removed from the stack. + * If this is set to "false", the destination files will be deleted when the + * resource is deleted or the destination is updated. + * + * NOTICE: if this is set to "false" and destination bucket/prefix is updated, + * all files in the previous destination will first be deleted and then + * uploaded to the new destination location. This could have availablity + * implications on your users. * - * @default false (when resource is deleted, files are deleted) + * @default true - when resource is deleted/updated, files are retained */ retainOnDelete?: boolean; } diff --git a/packages/@aws-cdk/aws-s3-deployment/lib/source.ts b/packages/@aws-cdk/aws-s3-deployment/lib/source.ts index 6d20c7673f368..9df095c59722f 100644 --- a/packages/@aws-cdk/aws-s3-deployment/lib/source.ts +++ b/packages/@aws-cdk/aws-s3-deployment/lib/source.ts @@ -4,10 +4,20 @@ import cdk = require('@aws-cdk/cdk'); import fs = require('fs'); export interface SourceProps { + /** + * The source bucket to deploy from. + */ bucket: s3.BucketRef; + + /** + * An S3 object key in the source bucket that points to a zip file. + */ zipObjectKey: string; } +/** + * Represents a source for bucket deployments. + */ export interface ISource { /** * Binds the source to a bucket deployment. @@ -22,7 +32,8 @@ export interface ISource { * Usage: * * Source.bucket(bucket, key) - * Source.asset('/path/to/asset') + * Source.asset('/local/path/to/directory') + * Source.asset('/local/path/to/a/file.zip') * */ export class Source { diff --git a/packages/@aws-cdk/aws-s3-deployment/test/integ.bucket-deployment.expected.json b/packages/@aws-cdk/aws-s3-deployment/test/integ.bucket-deployment.expected.json index 5fcdf8058d375..1d243d1c22759 100644 --- a/packages/@aws-cdk/aws-s3-deployment/test/integ.bucket-deployment.expected.json +++ b/packages/@aws-cdk/aws-s3-deployment/test/integ.bucket-deployment.expected.json @@ -87,7 +87,8 @@ }, "DestinationBucketName": { "Ref": "Destination920A3C57" - } + }, + "RetainOnDelete": false } }, "CustomCDKBucketDeployment8693BB64968944B69AAFB0CC9EB8756CServiceRole89A01265": { @@ -423,4 +424,4 @@ "Description": "S3 key for asset version \"test-bucket-deployments-1/DeployWithPrefix/Asset\"" } } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-s3-deployment/test/integ.bucket-deployment.ts b/packages/@aws-cdk/aws-s3-deployment/test/integ.bucket-deployment.ts index 4647741828a85..5e5fd54244db4 100644 --- a/packages/@aws-cdk/aws-s3-deployment/test/integ.bucket-deployment.ts +++ b/packages/@aws-cdk/aws-s3-deployment/test/integ.bucket-deployment.ts @@ -15,6 +15,7 @@ class TestBucketDeployment extends cdk.Stack { new s3deploy.BucketDeployment(this, 'DeployMe', { source: s3deploy.Source.asset(path.join(__dirname, 'my-website')), destinationBucket, + retainOnDelete: false, // default is true, which will block the integration test cleanup }); const bucket2 = new s3.Bucket(this, 'Destination2'); @@ -23,7 +24,7 @@ class TestBucketDeployment extends cdk.Stack { source: s3deploy.Source.asset(path.join(__dirname, 'my-website')), destinationBucket: bucket2, destinationKeyPrefix: 'deploy/here/', - retainOnDelete: false, // this is the default + retainOnDelete: false, // default is true, which will block the integration test cleanup }); } } diff --git a/packages/@aws-cdk/cdk/package-lock.json b/packages/@aws-cdk/cdk/package-lock.json index c1a70b7ca03d0..524ff15421f4b 100644 --- a/packages/@aws-cdk/cdk/package-lock.json +++ b/packages/@aws-cdk/cdk/package-lock.json @@ -1,6 +1,6 @@ { "name": "@aws-cdk/cdk", - "version": "0.12.0", + "version": "0.13.0", "lockfileVersion": 1, "requires": true, "dependencies": {