Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(aws-s3-deployment): avoid deletion during update using physical ids #1006

Merged
merged 1 commit into from
Oct 25, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 30 additions & 5 deletions packages/@aws-cdk/aws-s3-deployment/lambda/src/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
import json
import traceback
import logging
from uuid import uuid4

from botocore.vendored import requests
from zipfile import ZipFile

Expand All @@ -17,7 +19,7 @@
def handler(event, context):

def cfn_error(message=None):
logger.info("| cfn_error: %s" % message)
logger.error("| cfn_error: %s" % message)
cfn_send(event, context, CFN_FAILED, reason=message)

try:
Expand All @@ -29,41 +31,64 @@ def cfn_error(message=None):
# extract resource properties
props = event['ResourceProperties']
old_props = event.get('OldResourceProperties', {})
physical_id = event.get('PhysicalResourceId', None)

try:
source_bucket_name = props['SourceBucketName']
source_object_key = props['SourceObjectKey']
dest_bucket_name = props['DestinationBucketName']
dest_bucket_prefix = props.get('DestinationBucketKeyPrefix', '')
retain_on_delete = props.get('RetainOnDelete', "false") == "true"
retain_on_delete = props.get('RetainOnDelete', "true") == "true"
except KeyError as e:
cfn_error("missing request resource property %s" % str(e))
cfn_error("missing request resource property %s. props: %s" % (str(e), props))
return

# treat "/" as if no prefix was specified
if dest_bucket_prefix == "/":
dest_bucket_prefix = ""

s3_source_zip = "s3://%s/%s" % (source_bucket_name, source_object_key)
s3_dest = "s3://%s/%s" % (dest_bucket_name, dest_bucket_prefix)

old_s3_dest = "s3://%s/%s" % (old_props.get("DestinationBucketName", ""), old_props.get("DestinationBucketKeyPrefix", ""))

# obviously this is not
if old_s3_dest == "s3:///":
old_s3_dest = None

logger.info("| s3_dest: %s" % s3_dest)
logger.info("| old_s3_dest: %s" % old_s3_dest)

# if we are creating a new resource, allocate a physical id for it
# otherwise, we expect physical id to be relayed by cloudformation
if request_type == "Create":
physical_id = "aws.cdk.s3deployment.%s" % str(uuid4())
Copy link
Contributor

@rix0rrr rix0rrr Oct 24, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not just use the target S3 bucket location as physical ID? That expresses exactly what you want to achieve (if someone changes the target, you'll automatically get a DELETE on the old deployment)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The destination is mutable and I have code that handles updates for it by first deleting the old and then uploading to new (and not the opposite). This is to address the situation where for example old destination is a parent directory of the new destination

else:
if not physical_id:
cfn_error("invalid request: request type is '%s' but 'PhysicalResourceId' is not defined" % request_type)
return

# delete or create/update (only if "retain_on_delete" is false)
if request_type == "Delete" and not retain_on_delete:
aws_command("s3", "rm", s3_dest, "--recursive")

# if we are updating without retention and the destination changed, delete first
if request_type == "Update" and not retain_on_delete and old_s3_dest != s3_dest:
if not old_s3_dest:
logger.warn("cannot delete old resource without old resource properties")
return

aws_command("s3", "rm", old_s3_dest, "--recursive")

if request_type == "Update" or request_type == "Create":
s3_deploy(s3_source_zip, s3_dest)

cfn_send(event, context, CFN_SUCCESS)
cfn_send(event, context, CFN_SUCCESS, physicalResourceId=physical_id)
except KeyError as e:
cfn_error("invalid request. Missing key %s" % str(e))
except Exception as e:
logger.exception(e)
cfn_error()
cfn_error(str(e))

#---------------------------------------------------------------------------------------------------
# populate all files from s3_source_zip to a destination bucket
Expand Down
145 changes: 124 additions & 21 deletions packages/@aws-cdk/aws-s3-deployment/lambda/test/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,14 @@
class TestHandler(unittest.TestCase):
def setUp(self):
logger = logging.getLogger()
logger.addHandler(logging.NullHandler())

# clean up old aws.out file (from previous runs)
try: os.remove("aws.out")
except OSError: pass

def test_invalid_request(self):
resp = invoke_handler("Create", {}, expected_status="FAILED")
self.assertEqual(resp["Reason"], "missing request resource property 'SourceBucketName'")
self.assertEqual(resp["Reason"], "missing request resource property 'SourceBucketName'. props: {}")

def test_create_update(self):
invoke_handler("Create", {
Expand All @@ -34,6 +33,20 @@ def test_create_update(self):
"s3 sync --delete contents.zip s3://<dest-bucket-name>/"
)

def test_create_with_backslash_prefix_same_as_no_prefix(self):
invoke_handler("Create", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<dest-bucket-name>",
"DestinationBucketKeyPrefix": "/"
})

self.assertAwsCommands(
"s3 cp s3://<source-bucket>/<source-object-key> archive.zip",
"s3 sync --delete contents.zip s3://<dest-bucket-name>/"
)


def test_create_update_with_dest_key(self):
invoke_handler("Create", {
"SourceBucketName": "<source-bucket>",
Expand All @@ -47,12 +60,13 @@ def test_create_update_with_dest_key(self):
"s3 sync --delete contents.zip s3://<dest-bucket-name>/<dest-key-prefix>"
)

def test_delete(self):
def test_delete_no_retain(self):
invoke_handler("Delete", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<dest-bucket-name>"
})
"DestinationBucketName": "<dest-bucket-name>",
"RetainOnDelete": "false"
}, physical_id="<physicalid>")

self.assertAwsCommands("s3 rm s3://<dest-bucket-name>/ --recursive")

Expand All @@ -61,18 +75,30 @@ def test_delete_with_dest_key(self):
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<dest-bucket-name>",
"DestinationBucketKeyPrefix": "<dest-key-prefix>"
})
"DestinationBucketKeyPrefix": "<dest-key-prefix>",
"RetainOnDelete": "false"
}, physical_id="<physicalid>")

self.assertAwsCommands("s3 rm s3://<dest-bucket-name>/<dest-key-prefix> --recursive")

def test_delete_with_retain(self):
def test_delete_with_retain_explicit(self):
invoke_handler("Delete", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<dest-bucket-name>",
"RetainOnDelete": "true"
})
}, physical_id="<physicalid>")

# no aws commands (retain)
self.assertAwsCommands()

# RetainOnDelete=true is the default
def test_delete_with_retain_implicit_default(self):
invoke_handler("Delete", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<dest-bucket-name>"
}, physical_id="<physicalid>")

# no aws commands (retain)
self.assertAwsCommands()
Expand All @@ -83,7 +109,7 @@ def test_delete_with_retain_explicitly_false(self):
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<dest-bucket-name>",
"RetainOnDelete": "false"
})
}, physical_id="<physicalid>")

self.assertAwsCommands(
"s3 rm s3://<dest-bucket-name>/ --recursive"
Expand All @@ -100,7 +126,7 @@ def test_update_same_dest(self):
"DestinationBucketName": "<dest-bucket-name>",
}, old_resource_props={
"DestinationBucketName": "<dest-bucket-name>",
})
}, physical_id="<physical-id>")

self.assertAwsCommands(
"s3 cp s3://<source-bucket>/<source-object-key> archive.zip",
Expand All @@ -115,62 +141,136 @@ def test_update_new_dest_retain(self):
}, old_resource_props={
"DestinationBucketName": "<dest-bucket-name>",
"RetainOnDelete": "true"
})
}, physical_id="<physical-id>")

self.assertAwsCommands(
"s3 cp s3://<source-bucket>/<source-object-key> archive.zip",
"s3 sync --delete contents.zip s3://<dest-bucket-name>/"
)

def test_update_new_dest_no_retain_explicit(self):
def test_update_new_dest_no_retain(self):
invoke_handler("Update", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<new-dest-bucket-name>",
"RetainOnDelete": "false"
}, old_resource_props={
"DestinationBucketName": "<old-dest-bucket-name>",
"DestinationBucketKeyPrefix": "<old-dest-prefix>",
"RetainOnDelete": "false"
})
}, physical_id="<physical-id>")

self.assertAwsCommands(
"s3 rm s3://<old-dest-bucket-name>/<old-dest-prefix> --recursive",
"s3 cp s3://<source-bucket>/<source-object-key> archive.zip",
"s3 sync --delete contents.zip s3://<new-dest-bucket-name>/"
)

def test_update_new_dest_no_retain_implicit(self):
def test_update_new_dest_retain_implicit(self):
invoke_handler("Update", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<new-dest-bucket-name>",
}, old_resource_props={
"DestinationBucketName": "<old-dest-bucket-name>",
"DestinationBucketKeyPrefix": "<old-dest-prefix>"
})
}, physical_id="<physical-id>")

self.assertAwsCommands(
"s3 rm s3://<old-dest-bucket-name>/<old-dest-prefix> --recursive",
"s3 cp s3://<source-bucket>/<source-object-key> archive.zip",
"s3 sync --delete contents.zip s3://<new-dest-bucket-name>/"
)

def test_update_new_dest_prefix_no_retain_implicit(self):
def test_update_new_dest_prefix_no_retain(self):
invoke_handler("Update", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<dest-bucket-name>",
"DestinationBucketKeyPrefix": "<new-dest-prefix>"
"DestinationBucketKeyPrefix": "<new-dest-prefix>",
"RetainOnDelete": "false"
}, old_resource_props={
"DestinationBucketName": "<dest-bucket-name>",
})
"RetainOnDelete": "false"
}, physical_id="<physical id>")

self.assertAwsCommands(
"s3 rm s3://<dest-bucket-name>/ --recursive",
"s3 cp s3://<source-bucket>/<source-object-key> archive.zip",
"s3 sync --delete contents.zip s3://<dest-bucket-name>/<new-dest-prefix>"
)

def test_update_new_dest_prefix_retain_implicit(self):
invoke_handler("Update", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<dest-bucket-name>",
"DestinationBucketKeyPrefix": "<new-dest-prefix>"
}, old_resource_props={
"DestinationBucketName": "<dest-bucket-name>",
}, physical_id="<physical id>")

self.assertAwsCommands(
"s3 cp s3://<source-bucket>/<source-object-key> archive.zip",
"s3 sync --delete contents.zip s3://<dest-bucket-name>/<new-dest-prefix>"
)

#
# physical id
#

def test_physical_id_allocated_on_create_and_reused_afterwards(self):
create_resp = invoke_handler("Create", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<dest-bucket-name>",
})

phid = create_resp['PhysicalResourceId']
self.assertTrue(phid.startswith('aws.cdk.s3deployment'))

# now issue an update and pass in the physical id. expect the same
# one to be returned back
update_resp = invoke_handler("Update", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<new-dest-bucket-name>",
}, old_resource_props={
"DestinationBucketName": "<dest-bucket-name>",
}, physical_id=phid)
self.assertEqual(update_resp['PhysicalResourceId'], phid)

# now issue a delete, and make sure this also applies
delete_resp = invoke_handler("Delete", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<dest-bucket-name>",
"RetainOnDelete": "false"
}, physical_id=phid)
self.assertEqual(delete_resp['PhysicalResourceId'], phid)

def test_fails_when_physical_id_not_present_in_update(self):
update_resp = invoke_handler("Update", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<new-dest-bucket-name>",
}, old_resource_props={
"DestinationBucketName": "<dest-bucket-name>",
}, expected_status="FAILED")

self.assertEqual(update_resp['Reason'], "invalid request: request type is 'Update' but 'PhysicalResourceId' is not defined")

def test_fails_when_physical_id_not_present_in_delete(self):
update_resp = invoke_handler("Delete", {
"SourceBucketName": "<source-bucket>",
"SourceObjectKey": "<source-object-key>",
"DestinationBucketName": "<new-dest-bucket-name>",
}, old_resource_props={
"DestinationBucketName": "<dest-bucket-name>",
}, expected_status="FAILED")

self.assertEqual(update_resp['Reason'], "invalid request: request type is 'Delete' but 'PhysicalResourceId' is not defined")


# asserts that a given list of "aws xxx" commands have been invoked (in order)
def assertAwsCommands(self, *expected):
actual = read_aws_out()
Expand All @@ -193,7 +293,7 @@ def read_aws_out():
# requestType: CloudFormation request type ("Create", "Update", "Delete")
# resourceProps: map to pass to "ResourceProperties"
# expected_status: "SUCCESS" or "FAILED"
def invoke_handler(requestType, resourceProps, old_resource_props=None, expected_status='SUCCESS'):
def invoke_handler(requestType, resourceProps, old_resource_props=None, physical_id=None, expected_status='SUCCESS'):
response_url = '<response-url>'

event={
Expand All @@ -208,6 +308,9 @@ def invoke_handler(requestType, resourceProps, old_resource_props=None, expected
if old_resource_props:
event['OldResourceProperties'] = old_resource_props

if physical_id:
event['PhysicalResourceId'] = physical_id

class ContextMock: log_stream_name = 'log_stream'
class ResponseMock: reason = 'OK'

Expand Down
16 changes: 11 additions & 5 deletions packages/@aws-cdk/aws-s3-deployment/lib/bucket-deployment.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,22 @@ export interface BucketDeploymentProps {
destinationBucket: s3.BucketRef;

/**
* Key prefix in desination.
* @default No prefix (source == dest)
* Key prefix in the destination bucket.
*
* @default "/" (unzip to root of the destination bucket)
*/
destinationKeyPrefix?: string;

/**
* If this is enabled, files in destination bucket/prefix will not be deleted
* when the resource is deleted or removed from the stack.
* If this is set to "false", the destination files will be deleted when the
* resource is deleted or the destination is updated.
*
* NOTICE: if this is set to "false" and destination bucket/prefix is updated,
* all files in the previous destination will first be deleted and then
* uploaded to the new destination location. This could have availablity
* implications on your users.
*
* @default false (when resource is deleted, files are deleted)
* @default true - when resource is deleted/updated, files are retained
*/
retainOnDelete?: boolean;
}
Expand Down
Loading