diff --git a/packages/@aws-cdk/aws-backup/README.md b/packages/@aws-cdk/aws-backup/README.md index 6eaae31c19aa2..5cf5bc4c1aa1a 100644 --- a/packages/@aws-cdk/aws-backup/README.md +++ b/packages/@aws-cdk/aws-backup/README.md @@ -32,7 +32,8 @@ const plan = backup.BackupPlan.dailyWeeklyMonthly5YearRetention(this, 'Plan'); Assigning resources to a plan can be done with `addSelection()`: -```ts fixture=with-plan +```ts +declare const plan: backup.BackupPlan; const myTable = dynamodb.Table.fromTableName(this, 'Table', 'myTableName'); const myCoolConstruct = new Construct(this, 'MyCoolConstruct'); @@ -50,16 +51,17 @@ created for the selection. The `BackupSelection` implements `IGrantable`. To add rules to a plan, use `addRule()`: -```ts fixture=with-plan +```ts +declare const plan: backup.BackupPlan; plan.addRule(new backup.BackupPlanRule({ completionWindow: Duration.hours(2), startWindow: Duration.hours(1), scheduleExpression: events.Schedule.cron({ // Only cron expressions are supported day: '15', hour: '3', - minute: '30' + minute: '30', }), - moveToColdStorageAfter: Duration.days(30) + moveToColdStorageAfter: Duration.days(30), })); ``` @@ -69,7 +71,8 @@ If no value is specified, the retention period is set to 35 days which is the ma Property `moveToColdStorageAfter` must not be specified because PITR does not support this option. This example defines an AWS Backup rule with PITR and a retention period set to 14 days: -```ts fixture=with-plan +```ts +declare const plan: backup.BackupPlan; plan.addRule(new backup.BackupPlanRule({ enableContinuousBackup: true, deleteAfter: Duration.days(14), @@ -78,7 +81,8 @@ plan.addRule(new backup.BackupPlanRule({ Ready-made rules are also available: -```ts fixture=with-plan +```ts +declare const plan: backup.BackupPlan; plan.addRule(backup.BackupPlanRule.daily()); plan.addRule(backup.BackupPlanRule.weekly()); ``` @@ -152,7 +156,7 @@ const vault = new backup.BackupVault(this, 'Vault', { }, }), ], - }); + }), }) ``` @@ -166,8 +170,8 @@ new backup.BackupVault(this, 'Vault', { blockRecoveryPointDeletion: true, }); -const plan = backup.BackupPlan.dailyMonthly1YearRetention(this, 'Plan'); -plan.backupVault.blockRecoveryPointDeletion(); +declare const backupVault: backup.BackupVault; +backupVault.blockRecoveryPointDeletion(); ``` By default access is not restricted. diff --git a/packages/@aws-cdk/aws-backup/package.json b/packages/@aws-cdk/aws-backup/package.json index 0e1e552e15db4..a29e6be543645 100644 --- a/packages/@aws-cdk/aws-backup/package.json +++ b/packages/@aws-cdk/aws-backup/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-backup/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-backup/rosetta/default.ts-fixture index cff23bb514119..5f28d8bba18e2 100644 --- a/packages/@aws-cdk/aws-backup/rosetta/default.ts-fixture +++ b/packages/@aws-cdk/aws-backup/rosetta/default.ts-fixture @@ -3,6 +3,8 @@ import { Duration, RemovalPolicy, Stack } from '@aws-cdk/core'; import { Construct } from 'constructs'; import * as backup from '@aws-cdk/aws-backup'; import * as iam from '@aws-cdk/aws-iam'; +import * as dynamodb from '@aws-cdk/aws-dynamodb'; +import * as events from '@aws-cdk/aws-events'; import * as kms from '@aws-cdk/aws-kms'; import * as sns from '@aws-cdk/aws-sns'; diff --git a/packages/@aws-cdk/aws-backup/rosetta/with-plan.ts-fixture b/packages/@aws-cdk/aws-backup/rosetta/with-plan.ts-fixture deleted file mode 100644 index 8dbfd6ac72c89..0000000000000 --- a/packages/@aws-cdk/aws-backup/rosetta/with-plan.ts-fixture +++ /dev/null @@ -1,16 +0,0 @@ -// Fixture with packages imported, but nothing else -import { Duration, RemovalPolicy, Stack } from '@aws-cdk/core'; -import { Construct } from 'constructs'; -import * as backup from '@aws-cdk/aws-backup'; -import * as dynamodb from '@aws-cdk/aws-dynamodb'; -import * as events from '@aws-cdk/aws-events'; - -class Fixture extends Stack { - constructor(scope: Construct, id: string) { - super(scope, id); - - const plan = backup.BackupPlan.dailyWeeklyMonthly5YearRetention(this, 'Plan'); - - /// here - } -} diff --git a/packages/@aws-cdk/aws-certificatemanager/README.md b/packages/@aws-cdk/aws-certificatemanager/README.md index 331d40ad27276..0983dcc020c1d 100644 --- a/packages/@aws-cdk/aws-certificatemanager/README.md +++ b/packages/@aws-cdk/aws-certificatemanager/README.md @@ -40,9 +40,6 @@ If Amazon Route 53 is your DNS provider for the requested domain, the DNS record created automatically: ```ts -import * as acm from '@aws-cdk/aws-certificatemanager'; -import * as route53 from '@aws-cdk/aws-route53'; - const myHostedZone = new route53.HostedZone(this, 'HostedZone', { zoneName: 'example.com', }); @@ -106,6 +103,7 @@ The `DnsValidatedCertificate` construct exists to facilitate creating these cert Route53-based DNS validation. ```ts +declare const myHostedZone: route53.HostedZone; new acm.DnsValidatedCertificate(this, 'CrossRegionCertificate', { domainName: 'hello.example.com', hostedZone: myHostedZone, @@ -120,10 +118,10 @@ AWS Certificate Manager can create [private certificates](https://docs.aws.amazo ```ts import * as acmpca from '@aws-cdk/aws-acmpca'; -new acm.PrivateCertificate(stack, 'PrivateCertificate', { +new acm.PrivateCertificate(this, 'PrivateCertificate', { domainName: 'test.example.com', subjectAlternativeNames: ['cool.example.com', 'test.example.net'], // optional - certificateAuthority: acmpca.CertificateAuthority.fromCertificateAuthorityArn(stack, 'CA', + certificateAuthority: acmpca.CertificateAuthority.fromCertificateAuthorityArn(this, 'CA', 'arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/023077d8-2bfa-4eb0-8f22-05c96deade77'), }); ``` @@ -134,7 +132,7 @@ If you want to import an existing certificate, you can do so from its ARN: ```ts const arn = 'arn:aws:...'; -const certificate = Certificate.fromCertificateArn(this, 'Certificate', arn); +const certificate = acm.Certificate.fromCertificateArn(this, 'Certificate', arn); ``` ## Sharing between Stacks @@ -152,8 +150,14 @@ An alarm can be created to determine whether a certificate is soon due for renewal ussing the following code: ```ts -const certificate = new Certificate(this, 'Certificate', { /* ... */ }); -certificate.metricDaysToExpiry().createAlarm({ +import * as cloudwatch from '@aws-cdk/aws-cloudwatch'; + +declare const myHostedZone: route53.HostedZone; +const certificate = new acm.Certificate(this, 'Certificate', { + domainName: 'hello.example.com', + validation: acm.CertificateValidation.fromDns(myHostedZone), +}); +certificate.metricDaysToExpiry().createAlarm(this, 'Alarm', { comparisonOperator: cloudwatch.ComparisonOperator.LESS_THAN_THRESHOLD, evaluationPeriods: 1, threshold: 45, // Automatic rotation happens between 60 and 45 days before expiry diff --git a/packages/@aws-cdk/aws-certificatemanager/package.json b/packages/@aws-cdk/aws-certificatemanager/package.json index 659b98eb834b6..16d0ad6b33ee5 100644 --- a/packages/@aws-cdk/aws-certificatemanager/package.json +++ b/packages/@aws-cdk/aws-certificatemanager/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-certificatemanager/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-certificatemanager/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..0a11d49d2511f --- /dev/null +++ b/packages/@aws-cdk/aws-certificatemanager/rosetta/default.ts-fixture @@ -0,0 +1,12 @@ +// Fixture with packages imported, but nothing else +import { Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as acm from '@aws-cdk/aws-certificatemanager'; +import * as route53 from '@aws-cdk/aws-route53'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + /// here + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-cloudfront-origins/README.md b/packages/@aws-cdk/aws-cloudfront-origins/README.md index cb7af64ff8618..b0de0e2ffdaf4 100644 --- a/packages/@aws-cdk/aws-cloudfront-origins/README.md +++ b/packages/@aws-cdk/aws-cloudfront-origins/README.md @@ -18,9 +18,6 @@ An S3 bucket can be added as an origin. If the bucket is configured as a website documents. ```ts -import * as cloudfront from '@aws-cdk/aws-cloudfront'; -import * as origins from '@aws-cdk/aws-cloudfront-origins'; - const myBucket = new s3.Bucket(this, 'myBucket'); new cloudfront.Distribution(this, 'myDist', { defaultBehavior: { origin: new origins.S3Origin(myBucket) }, @@ -38,9 +35,6 @@ URLs and not S3 URLs directly. Alternatively, a custom origin access identity ca You can configure CloudFront to add custom headers to the requests that it sends to your origin. These custom headers enable you to send and gather information from your origin that you don’t get with typical viewer requests. These headers can even be customized for each origin. CloudFront supports custom headers for both for custom and Amazon S3 origins. ```ts -import * as cloudfront from '@aws-cdk/aws-cloudfront'; -import * as origins from '@aws-cdk/aws-cloudfront-origins'; - const myBucket = new s3.Bucket(this, 'myBucket'); new cloudfront.Distribution(this, 'myDist', { defaultBehavior: { origin: new origins.S3Origin(myBucket, { @@ -60,12 +54,12 @@ accessible (`internetFacing` is true). Both Application and Network load balance import * as ec2 from '@aws-cdk/aws-ec2'; import * as elbv2 from '@aws-cdk/aws-elasticloadbalancingv2'; -const vpc = new ec2.Vpc(...); +declare const vpc: ec2.Vpc; // Create an application load balancer in a VPC. 'internetFacing' must be 'true' // for CloudFront to access the load balancer and use it as an origin. const lb = new elbv2.ApplicationLoadBalancer(this, 'LB', { vpc, - internetFacing: true + internetFacing: true, }); new cloudfront.Distribution(this, 'myDist', { defaultBehavior: { origin: new origins.LoadBalancerV2Origin(lb) }, @@ -75,6 +69,9 @@ new cloudfront.Distribution(this, 'myDist', { The origin can also be customized to respond on different ports, have different connection properties, etc. ```ts +import * as elbv2 from '@aws-cdk/aws-elasticloadbalancingv2'; + +declare const loadBalancer: elbv2.ApplicationLoadBalancer; const origin = new origins.LoadBalancerV2Origin(loadBalancer, { connectionAttempts: 3, connectionTimeout: Duration.seconds(5), @@ -103,6 +100,7 @@ CloudFront automatically switches to the secondary origin. You achieve that behavior in the CDK using the `OriginGroup` class: ```ts +const myBucket = new s3.Bucket(this, 'myBucket'); new cloudfront.Distribution(this, 'myDist', { defaultBehavior: { origin: new origins.OriginGroup({ diff --git a/packages/@aws-cdk/aws-cloudfront-origins/package.json b/packages/@aws-cdk/aws-cloudfront-origins/package.json index 64298d1a43a83..ceb9bfe786459 100644 --- a/packages/@aws-cdk/aws-cloudfront-origins/package.json +++ b/packages/@aws-cdk/aws-cloudfront-origins/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-cloudfront-origins/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-cloudfront-origins/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..73800aee2e589 --- /dev/null +++ b/packages/@aws-cdk/aws-cloudfront-origins/rosetta/default.ts-fixture @@ -0,0 +1,15 @@ +// Fixture with packages imported, but nothing else +import { Duration, Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as cloudfront from '@aws-cdk/aws-cloudfront'; +import * as origins from '@aws-cdk/aws-cloudfront-origins'; +import * as s3 from '@aws-cdk/aws-s3'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + + } +} diff --git a/packages/@aws-cdk/aws-cloudtrail/README.md b/packages/@aws-cdk/aws-cloudtrail/README.md index 3deccd47545ea..46c5c0a95bd22 100644 --- a/packages/@aws-cdk/aws-cloudtrail/README.md +++ b/packages/@aws-cdk/aws-cloudtrail/README.md @@ -68,6 +68,8 @@ default retention setting. The following code enables sending CloudWatch logs bu period for the created Log Group. ```ts +import * as logs from '@aws-cdk/aws-logs'; + const trail = new cloudtrail.Trail(this, 'CloudTrail', { sendToCloudWatchLogs: true, cloudWatchLogsRetention: logs.RetentionDays.FOUR_MONTHS, @@ -88,18 +90,18 @@ The following code filters events for S3 from a specific AWS account and trigger ```ts const myFunctionHandler = new lambda.Function(this, 'MyFunction', { - code: lambda.Code.fromAsset('resource/myfunction'); + code: lambda.Code.fromAsset('resource/myfunction'), runtime: lambda.Runtime.NODEJS_12_X, handler: 'index.handler', }); -const eventRule = Trail.onEvent(this, 'MyCloudWatchEvent', { - target: new eventTargets.LambdaFunction(myFunctionHandler), +const eventRule = cloudtrail.Trail.onEvent(this, 'MyCloudWatchEvent', { + target: new targets.LambdaFunction(myFunctionHandler), }); eventRule.addEventPattern({ - account: '123456789012', - source: 'aws.s3', + account: ['123456789012'], + source: ['aws.s3'], }); ``` @@ -141,7 +143,7 @@ The following code configures the `Trail` to only track management events that a ```ts const trail = new cloudtrail.Trail(this, 'CloudTrail', { // ... - managementEvents: ReadWriteType.READ_ONLY, + managementEvents: cloudtrail.ReadWriteType.READ_ONLY, }); ``` @@ -157,13 +159,14 @@ be used to configure logging of S3 data events for specific buckets and specific configures logging of S3 data events for `fooBucket` and with object prefix `bar/`. ```ts -import * as cloudtrail from '@aws-cdk/aws-cloudtrail'; +import * as s3 from '@aws-cdk/aws-s3'; const trail = new cloudtrail.Trail(this, 'MyAmazingCloudTrail'); +declare const bucket: s3.Bucket; // Adds an event selector to the bucket foo trail.addS3EventSelector([{ - bucket: fooBucket, // 'fooBucket' is of type s3.IBucket + bucket, objectPrefix: 'bar/', }]); ``` @@ -174,12 +177,12 @@ configures logging of Lambda data events for a specific Function. ```ts const trail = new cloudtrail.Trail(this, 'MyAmazingCloudTrail'); -const amazingFunction = new lambda.Function(stack, 'AnAmazingFunction', { +const amazingFunction = new lambda.Function(this, 'AnAmazingFunction', { runtime: lambda.Runtime.NODEJS_12_X, handler: "hello.handler", code: lambda.Code.fromAsset("lambda"), }); // Add an event selector to log data events for the provided Lambda functions. -trail.addLambdaEventSelector([ lambdaFunction ]); +trail.addLambdaEventSelector([ amazingFunction ]); ``` diff --git a/packages/@aws-cdk/aws-cloudtrail/package.json b/packages/@aws-cdk/aws-cloudtrail/package.json index 3a81482d9fd44..b6b6fbaa232f0 100644 --- a/packages/@aws-cdk/aws-cloudtrail/package.json +++ b/packages/@aws-cdk/aws-cloudtrail/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-cloudtrail/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-cloudtrail/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..b6440cd045f44 --- /dev/null +++ b/packages/@aws-cdk/aws-cloudtrail/rosetta/default.ts-fixture @@ -0,0 +1,14 @@ +// Fixture with packages imported, but nothing else +import { Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as cloudtrail from '@aws-cdk/aws-cloudtrail'; +import * as sns from '@aws-cdk/aws-sns'; +import * as lambda from '@aws-cdk/aws-lambda'; +import * as targets from '@aws-cdk/aws-events-targets'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + /// here + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-config/README.md b/packages/@aws-cdk/aws-config/README.md index 0a8219a8c3f53..3a66934a43d69 100644 --- a/packages/@aws-cdk/aws-config/README.md +++ b/packages/@aws-cdk/aws-config/README.md @@ -59,16 +59,15 @@ For example, you could create a managed rule that checks whether active access k within the number of days specified. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as cdk from '@aws-cdk/core'; - // https://docs.aws.amazon.com/config/latest/developerguide/access-keys-rotated.html new config.ManagedRule(this, 'AccessKeysRotated', { identifier: config.ManagedRuleIdentifiers.ACCESS_KEYS_ROTATED, inputParameters: { - maxAccessKeyAge: 60 // default is 90 days + maxAccessKeyAge: 60, // default is 90 days }, - maximumExecutionFrequency: config.MaximumExecutionFrequency.TWELVE_HOURS // default is 24 hours + + // default is 24 hours + maximumExecutionFrequency: config.MaximumExecutionFrequency.TWELVE_HOURS, }); ``` @@ -82,9 +81,6 @@ The following higher level constructs for AWS managed rules are available. Checks whether your active access keys are rotated within the number of days specified. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as cdk from '@aws-cdk/aws-cdk'; - // compliant if access keys have been rotated within the last 90 days new config.AccessKeysRotated(this, 'AccessKeyRotated'); ``` @@ -95,12 +91,9 @@ Checks whether your CloudFormation stack's actual configuration differs, or has from it's expected configuration. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as cdk from '@aws-cdk/aws-cdk'; - // compliant if stack's status is 'IN_SYNC' // non-compliant if the stack's drift status is 'DRIFTED' -new config.CloudFormationStackDriftDetectionCheck(stack, 'Drift', { +new config.CloudFormationStackDriftDetectionCheck(this, 'Drift', { ownStackOnly: true, // checks only the stack containing the rule }); ``` @@ -110,17 +103,14 @@ new config.CloudFormationStackDriftDetectionCheck(stack, 'Drift', { Checks whether your CloudFormation stacks are sending event notifications to a SNS topic. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as cdk from '@aws-cdk/aws-cdk'; - // topics to which CloudFormation stacks may send event notifications -const topic1 = new sns.Topic(stack, 'AllowedTopic1'); -const topic2 = new sns.Topic(stack, 'AllowedTopic2'); +const topic1 = new sns.Topic(this, 'AllowedTopic1'); +const topic2 = new sns.Topic(this, 'AllowedTopic2'); // non-compliant if CloudFormation stack does not send notifications to 'topic1' or 'topic2' new config.CloudFormationStackNotificationCheck(this, 'NotificationCheck', { topics: [topic1, topic2], -}) +}); ``` ### Custom rules @@ -140,13 +130,15 @@ To create a custom rule, define a `CustomRule` and specify the Lambda Function to run and the trigger types. ```ts -import * as config from '@aws-cdk/aws-config'; +declare const evalComplianceFn: lambda.Function; new config.CustomRule(this, 'CustomRule', { lambdaFunction: evalComplianceFn, configurationChanges: true, periodic: true, - maximumExecutionFrequency: config.MaximumExecutionFrequency.SIX_HOURS, // default is 24 hours + + // default is 24 hours + maximumExecutionFrequency: config.MaximumExecutionFrequency.SIX_HOURS, }); ``` @@ -165,22 +157,21 @@ Use the `RuleScope` APIs (`fromResource()`, `fromResources()` or `fromTag()`) to the scope of both managed and custom rules: ```ts -import * as config from '@aws-cdk/aws-config'; - const sshRule = new config.ManagedRule(this, 'SSH', { identifier: config.ManagedRuleIdentifiers.EC2_SECURITY_GROUPS_INCOMING_SSH_DISABLED, ruleScope: config.RuleScope.fromResource(config.ResourceType.EC2_SECURITY_GROUP, 'sg-1234567890abcdefgh'), // restrict to specific security group }); +declare const evalComplianceFn: lambda.Function; const customRule = new config.CustomRule(this, 'Lambda', { lambdaFunction: evalComplianceFn, - configurationChanges: true + configurationChanges: true, ruleScope: config.RuleScope.fromResources([config.ResourceType.CLOUDFORMATION_STACK, config.ResourceType.S3_BUCKET]), // restrict to all CloudFormation stacks and S3 buckets }); const tagRule = new config.CustomRule(this, 'CostCenterTagRule', { lambdaFunction: evalComplianceFn, - configurationChanges: true + configurationChanges: true, ruleScope: config.RuleScope.fromTag('Cost Center', 'MyApp'), // restrict to a specific tag }); ``` @@ -194,10 +185,6 @@ Use the `onComplianceChange()` APIs to trigger an EventBridge event when a compl of your AWS Config Rule fails: ```ts -import * as config from '@aws-cdk/aws-config'; -import * as sns from '@aws-cdk/aws-sns'; -import * as targets from '@aws-cdk/aws-events-targets'; - // Topic to which compliance notification events will be published const complianceTopic = new sns.Topic(this, 'ComplianceTopic'); @@ -211,15 +198,13 @@ Use the `onReEvaluationStatus()` status to trigger an EventBridge event when an rule is re-evaluated. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as sns from '@aws-cdk/aws-sns'; -import * as targets from '@aws-cdk/aws-events-targets'; - // Topic to which re-evaluation notification events will be published const reEvaluationTopic = new sns.Topic(this, 'ComplianceTopic'); + +const rule = new config.CloudFormationStackDriftDetectionCheck(this, 'Drift'); rule.onReEvaluationStatus('ReEvaluationEvent', { target: new targets.SnsTopic(reEvaluationTopic), -}) +}); ``` ### Example @@ -228,11 +213,6 @@ The following example creates a custom rule that evaluates whether EC2 instances Compliance events are published to an SNS topic. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as lambda from '@aws-cdk/aws-lambda'; -import * as sns from '@aws-cdk/aws-sns'; -import * as targets from '@aws-cdk/aws-events-targets'; - // Lambda function containing logic that evaluates compliance with the rule. const evalComplianceFn = new lambda.Function(this, 'CustomFunction', { code: lambda.AssetCode.fromInline('exports.handler = (event) => console.log(event);'), @@ -244,7 +224,7 @@ const evalComplianceFn = new lambda.Function(this, 'CustomFunction', { const customRule = new config.CustomRule(this, 'Custom', { configurationChanges: true, lambdaFunction: evalComplianceFn, - ruleScope: config.RuleScope.fromResource([config.ResourceType.EC2_INSTANCE]), + ruleScope: config.RuleScope.fromResource(config.ResourceType.EC2_INSTANCE), }); // A rule to detect stack drifts diff --git a/packages/@aws-cdk/aws-config/package.json b/packages/@aws-cdk/aws-config/package.json index 3579910453b7a..0f2cc93d5f9ce 100644 --- a/packages/@aws-cdk/aws-config/package.json +++ b/packages/@aws-cdk/aws-config/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-config/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-config/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..f644a3f9c8157 --- /dev/null +++ b/packages/@aws-cdk/aws-config/rosetta/default.ts-fixture @@ -0,0 +1,14 @@ +// Fixture with packages imported, but nothing else +import { Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as config from '@aws-cdk/aws-config'; +import * as targets from '@aws-cdk/aws-events-targets'; +import * as sns from '@aws-cdk/aws-sns'; +import * as lambda from '@aws-cdk/aws-lambda'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + /// here + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-docdb/README.md b/packages/@aws-cdk/aws-docdb/README.md index 4791d99a138f7..6fcd235a513c8 100644 --- a/packages/@aws-cdk/aws-docdb/README.md +++ b/packages/@aws-cdk/aws-docdb/README.md @@ -18,17 +18,18 @@ always launch a database in a VPC. Use the `vpcSubnets` attribute to control whe your instances will be launched privately or publicly: ```ts -const cluster = new DatabaseCluster(this, 'Database', { - masterUser: { - username: 'myuser' // NOTE: 'admin' is reserved by DocumentDB - excludeCharacters: '\"@/:', // optional, defaults to the set "\"@/" and is also used for eventually created rotations - secretName: '/myapp/mydocdb/masteruser', // optional, if you prefer to specify the secret name - }, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.R5, ec2.InstanceSize.LARGE), - vpcSubnets: { - subnetType: ec2.SubnetType.PUBLIC, - }, - vpc +declare const vpc: ec2.Vpc; +const cluster = new docdb.DatabaseCluster(this, 'Database', { + masterUser: { + username: 'myuser', // NOTE: 'admin' is reserved by DocumentDB + excludeCharacters: '\"@/:', // optional, defaults to the set "\"@/" and is also used for eventually created rotations + secretName: '/myapp/mydocdb/masteruser', // optional, if you prefer to specify the secret name + }, + instanceType: ec2.InstanceType.of(ec2.InstanceClass.R5, ec2.InstanceSize.LARGE), + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + vpc, }); ``` @@ -42,6 +43,7 @@ To control who can access the cluster, use the `.connections` attribute. Documen you don't need to specify the port: ```ts +declare const cluster: docdb.DatabaseCluster; cluster.connections.allowDefaultPortFromAnyIpv4('Open to the world'); ``` @@ -49,6 +51,7 @@ The endpoints to access your database cluster will be available as the `.cluster attributes: ```ts +declare const cluster: docdb.DatabaseCluster; const writeAddress = cluster.clusterEndpoint.socketAddress; // "HOSTNAME:PORT" ``` @@ -56,7 +59,10 @@ If you have existing security groups you would like to add to the cluster, use t groups added in this way will not be managed by the `Connections` object of the cluster. ```ts -const securityGroup = new ec2.SecurityGroup(stack, 'SecurityGroup', { +declare const vpc: ec2.Vpc; +declare const cluster: docdb.DatabaseCluster; + +const securityGroup = new ec2.SecurityGroup(this, 'SecurityGroup', { vpc, }); cluster.addSecurityGroups(securityGroup); @@ -67,16 +73,17 @@ cluster.addSecurityGroups(securityGroup); Deletion protection can be enabled on an Amazon DocumentDB cluster to prevent accidental deletion of the cluster: ```ts -const cluster = new DatabaseCluster(this, 'Database', { - masterUser: { - username: 'myuser' - }, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.R5, ec2.InstanceSize.LARGE), - vpcSubnets: { - subnetType: ec2.SubnetType.PUBLIC, - }, - vpc, - deletionProtection: true // Enable deletion protection. +declare const vpc: ec2.Vpc; +const cluster = new docdb.DatabaseCluster(this, 'Database', { + masterUser: { + username: 'myuser', + }, + instanceType: ec2.InstanceType.of(ec2.InstanceClass.R5, ec2.InstanceSize.LARGE), + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + vpc, + deletionProtection: true, // Enable deletion protection. }); ``` @@ -85,6 +92,7 @@ const cluster = new DatabaseCluster(this, 'Database', { When the master password is generated and stored in AWS Secrets Manager, it can be rotated automatically: ```ts +declare const cluster: docdb.DatabaseCluster; cluster.addRotationSingleUser(); // Will rotate automatically after 30 days ``` @@ -93,22 +101,28 @@ cluster.addRotationSingleUser(); // Will rotate automatically after 30 days The multi user rotation scheme is also available: ```ts +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; + +declare const myImportedSecret: secretsmanager.Secret; +declare const cluster: docdb.DatabaseCluster; + cluster.addRotationMultiUser('MyUser', { - secret: myImportedSecret // This secret must have the `masterarn` key + secret: myImportedSecret, // This secret must have the `masterarn` key }); ``` It's also possible to create user credentials together with the cluster and add rotation: ```ts +declare const cluster: docdb.DatabaseCluster; const myUserSecret = new docdb.DatabaseSecret(this, 'MyUserSecret', { username: 'myuser', - masterSecret: cluster.secret + masterSecret: cluster.secret, }); const myUserSecretAttached = myUserSecret.attach(cluster); // Adds DB connections information in the secret cluster.addRotationMultiUser('MyUser', { // Add rotation using the multi user scheme - secret: myUserSecretAttached // This secret must have the `masterarn` key + secret: myUserSecretAttached, // This secret must have the `masterarn` key }); ``` @@ -126,8 +140,21 @@ Sending audit or profiler needs to be configured in two places: 2. Enable the corresponding option(s) when creating the `DatabaseCluster`: ```ts -const cluster = new DatabaseCluster(this, 'Database', { - ..., +import * as iam from '@aws-cdk/aws-iam'; +import * as logs from'@aws-cdk/aws-logs'; + +declare const myLogsPublishingRole: iam.Role; +declare const vpc: ec2.Vpc; + +const cluster = new docdb.DatabaseCluster(this, 'Database', { + masterUser: { + username: 'myuser', + }, + instanceType: ec2.InstanceType.of(ec2.InstanceClass.R5, ec2.InstanceSize.LARGE), + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + vpc, exportProfilerLogsToCloudWatch: true, // Enable sending profiler logs exportAuditLogsToCloudWatch: true, // Enable sending audit logs cloudWatchLogsRetention: logs.RetentionDays.THREE_MONTHS, // Optional - default is to never expire logs diff --git a/packages/@aws-cdk/aws-docdb/package.json b/packages/@aws-cdk/aws-docdb/package.json index 5e145b8fa06f7..8f4b043c9dbf4 100644 --- a/packages/@aws-cdk/aws-docdb/package.json +++ b/packages/@aws-cdk/aws-docdb/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-docdb/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-docdb/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..f5b4d71caa2a5 --- /dev/null +++ b/packages/@aws-cdk/aws-docdb/rosetta/default.ts-fixture @@ -0,0 +1,12 @@ +// Fixture with packages imported, but nothing else +import { Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as docdb from '@aws-cdk/aws-docdb'; +import * as ec2 from '@aws-cdk/aws-ec2'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + /// here + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-msk/README.md b/packages/@aws-cdk/aws-msk/README.md index 664ec4f66c973..36c85154c8d04 100644 --- a/packages/@aws-cdk/aws-msk/README.md +++ b/packages/@aws-cdk/aws-msk/README.md @@ -26,9 +26,9 @@ The following example creates an MSK Cluster. ```ts -import * as msk from '@aws-cdk/aws-msk'; - -const cluster = new Cluster(this, 'Cluster', { +declare const vpc: ec2.Vpc; +const cluster = new msk.Cluster(this, 'Cluster', { + clusterName: 'myCluster', kafkaVersion: msk.KafkaVersion.V2_8_1, vpc, }); @@ -38,40 +38,45 @@ const cluster = new Cluster(this, 'Cluster', { To control who can access the Cluster, use the `.connections` attribute. For a list of ports used by MSK, refer to the [MSK documentation](https://docs.aws.amazon.com/msk/latest/developerguide/client-access.html#port-info). -```typescript -import * as msk from "@aws-cdk/aws-msk" -import * as ec2 from "@aws-cdk/aws-ec2" - -const cluster = new msk.Cluster(this, "Cluster", {...}) +```ts +declare const vpc: ec2.Vpc; +const cluster = new msk.Cluster(this, 'Cluster', { + clusterName: 'myCluster', + kafkaVersion: msk.KafkaVersion.V2_8_1, + vpc, +}); cluster.connections.allowFrom( - ec2.Peer.ipv4("1.2.3.4/8"), - ec2.Port.tcp(2181) -) + ec2.Peer.ipv4('1.2.3.4/8'), + ec2.Port.tcp(2181), +); cluster.connections.allowFrom( - ec2.Peer.ipv4("1.2.3.4/8"), - ec2.Port.tcp(9094) -) + ec2.Peer.ipv4('1.2.3.4/8'), + ec2.Port.tcp(9094), +); ``` ## Cluster Endpoints You can use the following attributes to get a list of the Kafka broker or ZooKeeper node endpoints -```typescript -new cdk.CfnOutput(this, 'BootstrapBrokers', { value: cluster.bootstrapBrokers }); -new cdk.CfnOutput(this, 'BootstrapBrokersTls', { value: cluster.bootstrapBrokersTls }); -new cdk.CfnOutput(this, 'BootstrapBrokersSaslScram', { value: cluster.bootstrapBrokersSaslScram }); -new cdk.CfnOutput(this, 'ZookeeperConnection', { value: cluster.zookeeperConnectionString }); -new cdk.CfnOutput(this, 'ZookeeperConnectionTls', { value: cluster.zookeeperConnectionStringTls }); +```ts +declare const cluster: msk.Cluster; +new CfnOutput(this, 'BootstrapBrokers', { value: cluster.bootstrapBrokers }); +new CfnOutput(this, 'BootstrapBrokersTls', { value: cluster.bootstrapBrokersTls }); +new CfnOutput(this, 'BootstrapBrokersSaslScram', { value: cluster.bootstrapBrokersSaslScram }); +new CfnOutput(this, 'ZookeeperConnection', { value: cluster.zookeeperConnectionString }); +new CfnOutput(this, 'ZookeeperConnectionTls', { value: cluster.zookeeperConnectionStringTls }); ``` ## Importing an existing Cluster To import an existing MSK cluster into your CDK app use the `.fromClusterArn()` method. -```typescript -const cluster = msk.Cluster.fromClusterArn(this, 'Cluster', 'arn:aws:kafka:us-west-2:1234567890:cluster/a-cluster/11111111-1111-1111-1111-111111111111-1') +```ts +const cluster = msk.Cluster.fromClusterArn(this, 'Cluster', + 'arn:aws:kafka:us-west-2:1234567890:cluster/a-cluster/11111111-1111-1111-1111-111111111111-1', +); ``` ## Client Authentication @@ -84,25 +89,26 @@ const cluster = msk.Cluster.fromClusterArn(this, 'Cluster', 'arn:aws:kafka:us-we To enable client authentication with TLS set the `certificateAuthorityArns` property to reference your ACM Private CA. [More info on Private CAs.](https://docs.aws.amazon.com/msk/latest/developerguide/msk-authentication.html) -```typescript -import * as msk from "@aws-cdk/aws-msk" -import * as acmpca from "@aws-cdk/aws-acmpca" +```ts +import * as acmpca from '@aws-cdk/aws-acmpca'; +declare const vpc: ec2.Vpc; const cluster = new msk.Cluster(this, 'Cluster', { - ... - encryptionInTransit: { - clientBroker: msk.ClientBrokerEncryption.TLS, - }, - clientAuthentication: msk.ClientAuthentication.tls({ - certificateAuthorities: [ - acmpca.CertificateAuthority.fromCertificateAuthorityArn( - stack, - "CertificateAuthority", - "arn:aws:acm-pca:us-west-2:1234567890:certificate-authority/11111111-1111-1111-1111-111111111111" - ), - ], - }), - }); + clusterName: 'myCluster', + kafkaVersion: msk.KafkaVersion.V2_8_1, + vpc, + encryptionInTransit: { + clientBroker: msk.ClientBrokerEncryption.TLS, + }, + clientAuthentication: msk.ClientAuthentication.tls({ + certificateAuthorities: [ + acmpca.CertificateAuthority.fromCertificateAuthorityArn( + this, + 'CertificateAuthority', + 'arn:aws:acm-pca:us-west-2:1234567890:certificate-authority/11111111-1111-1111-1111-111111111111', + ), + ], + }), }); ``` @@ -110,34 +116,36 @@ const cluster = new msk.Cluster(this, 'Cluster', { Enable client authentication with [SASL/SCRAM](https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html): -```typescript -import * as msk from "@aws-cdk/aws-msk" - -const cluster = new msk.cluster(this, "cluster", { - ... +```ts +declare const vpc: ec2.Vpc; +const cluster = new msk.Cluster(this, 'cluster', { + clusterName: 'myCluster', + kafkaVersion: msk.KafkaVersion.V2_8_1, + vpc, encryptionInTransit: { clientBroker: msk.ClientBrokerEncryption.TLS, }, clientAuthentication: msk.ClientAuthentication.sasl({ scram: true, }), -}) +}); ``` ### SASL/IAM Enable client authentication with [IAM](https://docs.aws.amazon.com/msk/latest/developerguide/iam-access-control.html): -```typescript -import * as msk from "@aws-cdk/aws-msk" - -const cluster = new msk.cluster(this, "cluster", { - ... +```ts +declare const vpc: ec2.Vpc; +const cluster = new msk.Cluster(this, 'cluster', { + clusterName: 'myCluster', + kafkaVersion: msk.KafkaVersion.V2_8_1, + vpc, encryptionInTransit: { clientBroker: msk.ClientBrokerEncryption.TLS, }, clientAuthentication: msk.ClientAuthentication.sasl({ iam: true, }), -}) +}); ``` diff --git a/packages/@aws-cdk/aws-msk/package.json b/packages/@aws-cdk/aws-msk/package.json index 70978b758f5e0..79fb57bbea781 100644 --- a/packages/@aws-cdk/aws-msk/package.json +++ b/packages/@aws-cdk/aws-msk/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-msk/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-msk/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..e6009423c7553 --- /dev/null +++ b/packages/@aws-cdk/aws-msk/rosetta/default.ts-fixture @@ -0,0 +1,12 @@ +// Fixture with packages imported, but nothing else +import { CfnOutput, Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as msk from '@aws-cdk/aws-msk'; +import * as ec2 from '@aws-cdk/aws-ec2'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + /// here + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-s3-assets/README.md b/packages/@aws-cdk/aws-s3-assets/README.md index a73cbf0919642..bf005eba020d4 100644 --- a/packages/@aws-cdk/aws-s3-assets/README.md +++ b/packages/@aws-cdk/aws-s3-assets/README.md @@ -95,18 +95,21 @@ method `tryBundle()` which should return `true` if local bundling was performed. If `false` is returned, docker bundling will be done: ```ts +class MyBundle implements ILocalBundling { + public tryBundle(outputDir: string, options: BundlingOptions) { + const canRunLocally = true // replace with actual logic + if (canRunLocally) { + // perform local bundling here + return true; + } + return false; + } +} + new assets.Asset(this, 'BundledAsset', { path: '/path/to/asset', bundling: { - local: { - tryBundle(outputDir: string, options: BundlingOptions) { - if (canRunLocally) { - // perform local bundling here - return true; - } - return false; - }, - }, + local: new MyBundle(), // Docker bundling fallback image: DockerImage.fromRegistry('alpine'), entrypoint: ['/bin/sh', '-c'], diff --git a/packages/@aws-cdk/aws-s3-assets/lib/asset.ts b/packages/@aws-cdk/aws-s3-assets/lib/asset.ts index 484e04e4a9cb2..2f04f4532b36e 100644 --- a/packages/@aws-cdk/aws-s3-assets/lib/asset.ts +++ b/packages/@aws-cdk/aws-s3-assets/lib/asset.ts @@ -76,13 +76,13 @@ export class Asset extends CoreConstruct implements cdk.IAsset { /** * Attribute which represents the S3 HTTP URL of this asset. - * @example https://s3.us-west-1.amazonaws.com/bucket/key + * For example, `https://s3.us-west-1.amazonaws.com/bucket/key` */ public readonly httpUrl: string; /** * Attribute which represents the S3 URL of this asset. - * @example s3://bucket/key + * For example, `s3://bucket/key` */ public readonly s3ObjectUrl: string; diff --git a/packages/@aws-cdk/aws-s3-assets/package.json b/packages/@aws-cdk/aws-s3-assets/package.json index 084b0968e7306..45163b32641f7 100644 --- a/packages/@aws-cdk/aws-s3-assets/package.json +++ b/packages/@aws-cdk/aws-s3-assets/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-s3-assets/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-s3-assets/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..52f4c907d8b07 --- /dev/null +++ b/packages/@aws-cdk/aws-s3-assets/rosetta/default.ts-fixture @@ -0,0 +1,12 @@ +// Fixture with packages imported, but nothing else +import { Construct } from 'constructs'; +import { BundlingOptions, BundlingOutput, DockerImage, ILocalBundling, Stack } from '@aws-cdk/core'; +import * as assets from '@aws-cdk/aws-s3-assets'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-s3-deployment/README.md b/packages/@aws-cdk/aws-s3-deployment/README.md index 37a571b075f40..f9bff70495c1d 100644 --- a/packages/@aws-cdk/aws-s3-deployment/README.md +++ b/packages/@aws-cdk/aws-s3-deployment/README.md @@ -20,13 +20,13 @@ enabled and populates it from a local directory on disk. ```ts const websiteBucket = new s3.Bucket(this, 'WebsiteBucket', { websiteIndexDocument: 'index.html', - publicReadAccess: true + publicReadAccess: true, }); new s3deploy.BucketDeployment(this, 'DeployWebsite', { sources: [s3deploy.Source.asset('./website-dist')], destinationBucket: websiteBucket, - destinationKeyPrefix: 'web/static' // optional prefix in destination bucket + destinationKeyPrefix: 'web/static', // optional prefix in destination bucket }); ``` @@ -110,6 +110,7 @@ when the `BucketDeployment` resource is created or updated. You can use the opti this behavior, in which case the files will not be deleted. ```ts +declare const destinationBucket: s3.Bucket; new s3deploy.BucketDeployment(this, 'DeployMeWithoutDeletingFilesOnDestination', { sources: [s3deploy.Source.asset(path.join(__dirname, 'my-website'))], destinationBucket, @@ -122,17 +123,18 @@ each with its own characteristics. For example, you can set different cache-cont based on file extensions: ```ts -new BucketDeployment(this, 'BucketDeployment', { - sources: [Source.asset('./website', { exclude: ['index.html'] })], - destinationBucket: bucket, - cacheControl: [CacheControl.fromString('max-age=31536000,public,immutable')], +declare const destinationBucket: s3.Bucket; +new s3deploy.BucketDeployment(this, 'BucketDeployment', { + sources: [s3deploy.Source.asset('./website', { exclude: ['index.html'] })], + destinationBucket, + cacheControl: [s3deploy.CacheControl.fromString('max-age=31536000,public,immutable')], prune: false, }); -new BucketDeployment(this, 'HTMLBucketDeployment', { - sources: [Source.asset('./website', { exclude: ['*', '!index.html'] })], - destinationBucket: bucket, - cacheControl: [CacheControl.fromString('max-age=0,no-cache,no-store,must-revalidate')], +new s3deploy.BucketDeployment(this, 'HTMLBucketDeployment', { + sources: [s3deploy.Source.asset('./website', { exclude: ['*', '!index.html'] })], + destinationBucket, + cacheControl: [s3deploy.CacheControl.fromString('max-age=0,no-cache,no-store,must-revalidate')], prune: false, }); ``` @@ -142,19 +144,21 @@ new BucketDeployment(this, 'HTMLBucketDeployment', { There are two points at which filters are evaluated in a deployment: asset bundling and the actual deployment. If you simply want to exclude files in the asset bundling process, you should leverage the `exclude` property of `AssetOptions` when defining your source: ```ts -new BucketDeployment(this, 'HTMLBucketDeployment', { - sources: [Source.asset('./website', { exclude: ['*', '!index.html'] })], - destinationBucket: bucket, +declare const destinationBucket: s3.Bucket; +new s3deploy.BucketDeployment(this, 'HTMLBucketDeployment', { + sources: [s3deploy.Source.asset('./website', { exclude: ['*', '!index.html'] })], + destinationBucket, }); ``` If you want to specify filters to be used in the deployment process, you can use the `exclude` and `include` filters on `BucketDeployment`. If excluded, these files will not be deployed to the destination bucket. In addition, if the file already exists in the destination bucket, it will not be deleted if you are using the `prune` option: ```ts +declare const destinationBucket: s3.Bucket; new s3deploy.BucketDeployment(this, 'DeployButExcludeSpecificFiles', { sources: [s3deploy.Source.asset(path.join(__dirname, 'my-website'))], destinationBucket, - exclude: ['*.txt'] + exclude: ['*.txt'], }); ``` @@ -189,7 +193,7 @@ and [`aws s3 sync` documentation](https://docs.aws.amazon.com/cli/latest/referen ```ts const websiteBucket = new s3.Bucket(this, 'WebsiteBucket', { websiteIndexDocument: 'index.html', - publicReadAccess: true + publicReadAccess: true, }); new s3deploy.BucketDeployment(this, 'DeployWebsite', { @@ -201,9 +205,12 @@ new s3deploy.BucketDeployment(this, 'DeployWebsite', { // system-defined metadata contentType: "text/html", contentLanguage: "en", - storageClass: StorageClass.INTELLIGENT_TIERING, - serverSideEncryption: ServerSideEncryption.AES_256, - cacheControl: [CacheControl.setPublic(), CacheControl.maxAge(cdk.Duration.hours(1))], + storageClass: s3deploy.StorageClass.INTELLIGENT_TIERING, + serverSideEncryption: s3deploy.ServerSideEncryption.AES_256, + cacheControl: [ + s3deploy.CacheControl.setPublic(), + s3deploy.CacheControl.maxAge(Duration.hours(1)), + ], accessControl: s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL, }); ``` @@ -250,13 +257,16 @@ Please note that creating VPC inline may cause stack deletion failures. It is sh To avoid such condition, keep your network infra (VPC) in a separate stack and pass as props. ```ts +declare const destinationBucket: s3.Bucket; +declare const vpc: ec2.Vpc; + new s3deploy.BucketDeployment(this, 'DeployMeWithEfsStorage', { - sources: [s3deploy.Source.asset(path.join(__dirname, 'my-website'))], - destinationBucket, - destinationKeyPrefix: 'efs/', - useEfs: true, - vpc: new ec2.Vpc(this, 'Vpc'), - retainOnDelete: false, + sources: [s3deploy.Source.asset(path.join(__dirname, 'my-website'))], + destinationBucket, + destinationKeyPrefix: 'efs/', + useEfs: true, + vpc, + retainOnDelete: false, }); ``` diff --git a/packages/@aws-cdk/aws-s3-deployment/package.json b/packages/@aws-cdk/aws-s3-deployment/package.json index df08ee72901ab..2f5fd80f65066 100644 --- a/packages/@aws-cdk/aws-s3-deployment/package.json +++ b/packages/@aws-cdk/aws-s3-deployment/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-s3-deployment/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-s3-deployment/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..75a435a142566 --- /dev/null +++ b/packages/@aws-cdk/aws-s3-deployment/rosetta/default.ts-fixture @@ -0,0 +1,15 @@ +// Fixture with packages imported, but nothing else +import { Duration, Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as s3deploy from '@aws-cdk/aws-s3-deployment'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as ec2 from'@aws-cdk/aws-ec2'; +import * as path from 'path'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-s3-notifications/README.md b/packages/@aws-cdk/aws-s3-notifications/README.md index f054708f437fb..0b57126001cf8 100644 --- a/packages/@aws-cdk/aws-s3-notifications/README.md +++ b/packages/@aws-cdk/aws-s3-notifications/README.md @@ -18,10 +18,10 @@ The following example shows how to send a notification to an SNS topic when an object is created in an S3 bucket: ```ts -import * as s3n from '@aws-cdk/aws-s3-notifications'; +import * as sns from '@aws-cdk/aws-sns'; -const bucket = new s3.Bucket(stack, 'Bucket'); -const topic = new sns.Topic(stack, 'Topic'); +const bucket = new s3.Bucket(this, 'Bucket'); +const topic = new sns.Topic(this, 'Topic'); bucket.addEventNotification(s3.EventType.OBJECT_CREATED_PUT, new s3n.SnsDestination(topic)); ``` @@ -29,13 +29,13 @@ bucket.addEventNotification(s3.EventType.OBJECT_CREATED_PUT, new s3n.SnsDestinat The following example shows how to send a notification to a Lambda function when an object is created in an S3 bucket: ```ts -import * as s3n from '@aws-cdk/aws-s3-notifications'; +import * as lambda from '@aws-cdk/aws-lambda'; -const bucket = new s3.Bucket(stack, 'Bucket'); -const fn = new Function(this, 'MyFunction', { - runtime: Runtime.NODEJS_12_X, +const bucket = new s3.Bucket(this, 'Bucket'); +const fn = new lambda.Function(this, 'MyFunction', { + runtime: lambda.Runtime.NODEJS_12_X, handler: 'index.handler', - code: Code.fromAsset(path.join(__dirname, 'lambda-handler')), + code: lambda.Code.fromAsset(path.join(__dirname, 'lambda-handler')), }); bucket.addEventNotification(s3.EventType.OBJECT_CREATED, new s3n.LambdaDestination(fn)); diff --git a/packages/@aws-cdk/aws-s3-notifications/package.json b/packages/@aws-cdk/aws-s3-notifications/package.json index 7c04d633bee5d..ccc6d603d1f5a 100644 --- a/packages/@aws-cdk/aws-s3-notifications/package.json +++ b/packages/@aws-cdk/aws-s3-notifications/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-s3-notifications/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-s3-notifications/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..36e2218e03d06 --- /dev/null +++ b/packages/@aws-cdk/aws-s3-notifications/rosetta/default.ts-fixture @@ -0,0 +1,14 @@ +// Fixture with packages imported, but nothing else +import { Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as s3n from '@aws-cdk/aws-s3-notifications'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as path from 'path'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-servicecatalogappregistry/README.md b/packages/@aws-cdk/aws-servicecatalogappregistry/README.md index e816724f30d3c..6fcee1d94bf9c 100644 --- a/packages/@aws-cdk/aws-servicecatalogappregistry/README.md +++ b/packages/@aws-cdk/aws-servicecatalogappregistry/README.md @@ -54,8 +54,11 @@ An application that has been created outside of the stack can be imported into y Applications can be imported by their ARN via the `Application.fromApplicationArn()` API: ```ts -const importedApplication = appreg.Application.fromApplicationArn(this, 'MyImportedApplication', - 'arn:aws:servicecatalog:us-east-1:012345678910:/applications/0aqmvxvgmry0ecc4mjhwypun6i'); +const importedApplication = appreg.Application.fromApplicationArn( + this, + 'MyImportedApplication', + 'arn:aws:servicecatalog:us-east-1:012345678910:/applications/0aqmvxvgmry0ecc4mjhwypun6i', +); ``` ## Attribute Group @@ -84,8 +87,11 @@ An attribute group that has been created outside of the stack can be imported in Attribute groups can be imported by their ARN via the `AttributeGroup.fromAttributeGroupArn()` API: ```ts -const importedAttributeGroup = appreg.AttributeGroup.fromAttributeGroupArn(this, 'MyImportedAttrGroup', - 'arn:aws:servicecatalog:us-east-1:012345678910:/attribute-groups/0aqmvxvgmry0ecc4mjhwypun6i'); +const importedAttributeGroup = appreg.AttributeGroup.fromAttributeGroupArn( + this, + 'MyImportedAttrGroup', + 'arn:aws:servicecatalog:us-east-1:012345678910:/attribute-groups/0aqmvxvgmry0ecc4mjhwypun6i', +); ``` ## Associations @@ -101,7 +107,9 @@ CDK will fail at deploy time. You can associate an attribute group with an application with the `associateAttributeGroup()` API: -```ts basic-constructs +```ts +declare const application: appreg.Application; +declare const attributeGroup: appreg.AttributeGroup; application.associateAttributeGroup(attributeGroup); ``` @@ -109,8 +117,10 @@ application.associateAttributeGroup(attributeGroup); You can associate a stack with an application with the `associateStack()` API: -```ts basic-constructs -const myStack = new cdk.Stack(app, 'MyStack'); +```ts +const app = new App(); +const myStack = new Stack(app, 'MyStack'); +declare const application: appreg.Application; application.associateStack(myStack); ``` diff --git a/packages/@aws-cdk/aws-servicecatalogappregistry/package.json b/packages/@aws-cdk/aws-servicecatalogappregistry/package.json index 0da24cca81ef4..8e1de88eac87d 100644 --- a/packages/@aws-cdk/aws-servicecatalogappregistry/package.json +++ b/packages/@aws-cdk/aws-servicecatalogappregistry/package.json @@ -7,6 +7,13 @@ "jsii": { "outdir": "dist", "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + }, "targets": { "dotnet": { "namespace": "Amazon.CDK.AWS.ServiceCatalogAppRegistry", diff --git a/packages/@aws-cdk/aws-servicecatalogappregistry/rosetta/basic-constructs.ts-fixture b/packages/@aws-cdk/aws-servicecatalogappregistry/rosetta/basic-constructs.ts-fixture deleted file mode 100644 index 19ffd84abf486..0000000000000 --- a/packages/@aws-cdk/aws-servicecatalogappregistry/rosetta/basic-constructs.ts-fixture +++ /dev/null @@ -1,22 +0,0 @@ -// Fixture with packages imported, but nothing else -import * as cdk from '@aws-cdk/core'; -import * as appreg from '@aws-cdk/aws-servicecatalogappregistry'; - -class Fixture extends cdk.Stack { - constructor(scope: Construct, id: string) { - super(scope, id); - - const application = new appreg.Application(stack, 'MyApplication', { - applicationName: 'MyApplication', - }); - - const attributeGroup = new appreg.AttributeGroup(stack, 'MyAttributeGroup', { - attributeGroupName: 'testAttributeGroup', - attributes: { - key: 'value', - }, - }); - - /// here - } -} diff --git a/packages/@aws-cdk/aws-servicecatalogappregistry/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-servicecatalogappregistry/rosetta/default.ts-fixture index 61dfd75923e1e..45174ea8d63ed 100644 --- a/packages/@aws-cdk/aws-servicecatalogappregistry/rosetta/default.ts-fixture +++ b/packages/@aws-cdk/aws-servicecatalogappregistry/rosetta/default.ts-fixture @@ -1,8 +1,9 @@ // Fixture with packages imported, but nothing else -import * as cdk from '@aws-cdk/core'; +import { App, Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; import * as appreg from '@aws-cdk/aws-servicecatalogappregistry'; -class Fixture extends cdk.Stack { +class Fixture extends Stack { constructor(scope: Construct, id: string) { super(scope, id); diff --git a/packages/@aws-cdk/aws-ses/README.md b/packages/@aws-cdk/aws-ses/README.md index b2d36c9785175..04fc9f2e26202 100644 --- a/packages/@aws-cdk/aws-ses/README.md +++ b/packages/@aws-cdk/aws-ses/README.md @@ -20,56 +20,58 @@ Create a receipt rule set with rules and actions (actions can be found in the ```ts import * as s3 from '@aws-cdk/aws-s3'; -import * as ses from '@aws-cdk/aws-ses'; import * as actions from '@aws-cdk/aws-ses-actions'; -import * as sns from '@aws-cdk/aws-sns'; -const bucket = new s3.Bucket(stack, 'Bucket'); -const topic = new sns.Topic(stack, 'Topic'); +const bucket = new s3.Bucket(this, 'Bucket'); +const topic = new sns.Topic(this, 'Topic'); -new ses.ReceiptRuleSet(stack, 'RuleSet', { +new ses.ReceiptRuleSet(this, 'RuleSet', { rules: [ { recipients: ['hello@aws.com'], actions: [ new actions.AddHeader({ name: 'X-Special-Header', - value: 'aws' + value: 'aws', }), new actions.S3({ bucket, objectKeyPrefix: 'emails/', - topic - }) + topic, + }), ], }, { recipients: ['aws.com'], actions: [ new actions.Sns({ - topic - }) - ] - } - ] + topic, + }), + ], + }, + ], }); ``` Alternatively, rules can be added to a rule set: ```ts -const ruleSet = new ses.ReceiptRuleSet(this, 'RuleSet'): +const ruleSet = new ses.ReceiptRuleSet(this, 'RuleSet'); const awsRule = ruleSet.addRule('Aws', { - recipients: ['aws.com'] + recipients: ['aws.com'], }); ``` And actions to rules: ```ts +import * as actions from '@aws-cdk/aws-ses-actions'; + +declare const awsRule: ses.ReceiptRule; +declare const topic: sns.Topic; awsRule.addAction(new actions.Sns({ - topic + topic, })); ``` @@ -81,7 +83,7 @@ A rule to drop spam can be added by setting `dropSpam` to `true`: ```ts new ses.ReceiptRuleSet(this, 'RuleSet', { - dropSpam: true + dropSpam: true, }); ``` @@ -94,8 +96,8 @@ Create a receipt filter: ```ts new ses.ReceiptFilter(this, 'Filter', { - ip: '1.2.3.4/16' // Will be blocked -}) + ip: '1.2.3.4/16', // Will be blocked +}); ``` An allow list filter is also available: @@ -105,7 +107,7 @@ new ses.AllowListReceiptFilter(this, 'AllowList', { ips: [ '10.0.0.0/16', '1.2.3.4/16', - ] + ], }); ``` diff --git a/packages/@aws-cdk/aws-ses/package.json b/packages/@aws-cdk/aws-ses/package.json index 611d336d3dbfa..1ef321d62b129 100644 --- a/packages/@aws-cdk/aws-ses/package.json +++ b/packages/@aws-cdk/aws-ses/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-ses/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-ses/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..f86757d5fe6f4 --- /dev/null +++ b/packages/@aws-cdk/aws-ses/rosetta/default.ts-fixture @@ -0,0 +1,12 @@ +// Fixture with packages imported, but nothing else +import { Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as ses from '@aws-cdk/aws-ses'; +import * as sns from '@aws-cdk/aws-sns'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + /// here + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts b/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts index 9bf124c31c71d..4d98b3a29bb32 100644 --- a/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts +++ b/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts @@ -1,4 +1,37 @@ +/** + * Information needed to access an IAM role created + * as part of the bootstrap process + */ +export interface BootstrapRole { + /** + * The ARN of the IAM role created as part of bootrapping + * e.g. lookupRoleArn + */ + readonly arn: string; + + /** + * External ID to use when assuming the bootstrap role + * + * @default - No external ID + */ + readonly assumeRoleExternalId?: string; + + /** + * Version of bootstrap stack required to use this role + * + * @default - No bootstrap stack required + */ + readonly requiresBootstrapStackVersion?: number; + + /** + * Name of SSM parameter with bootstrap stack version + * + * @default - Discover SSM parameter by reading stack + */ + readonly bootstrapStackVersionSsmParameter?: string; +} + /** * Artifact properties for CloudFormation stacks. */ @@ -56,6 +89,13 @@ export interface AwsCloudFormationStackProperties { */ readonly cloudFormationExecutionRoleArn?: string; + /** + * The role to use to look up values from the target AWS account + * + * @default - No role is assumed (current credentials are used) + */ + readonly lookupRole?: BootstrapRole; + /** * If the stack template has already been included in the asset manifest, its asset URL * diff --git a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json index 9241ae62ef0ff..5fe1f4fb4321a 100644 --- a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json +++ b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json @@ -307,6 +307,10 @@ "description": "The role that is passed to CloudFormation to execute the change set (Default - No role is passed (currently assumed role/credentials are used))", "type": "string" }, + "lookupRole": { + "description": "The role to use to look up values from the target AWS account (Default - No role is assumed (current credentials are used))", + "$ref": "#/definitions/BootstrapRole" + }, "stackTemplateAssetObjectUrl": { "description": "If the stack template has already been included in the asset manifest, its asset URL (Default - Not uploaded yet, upload just before deploying)", "type": "string" @@ -328,6 +332,31 @@ "templateFile" ] }, + "BootstrapRole": { + "description": "Information needed to access an IAM role created\nas part of the bootstrap process", + "type": "object", + "properties": { + "arn": { + "description": "The ARN of the IAM role created as part of bootrapping\ne.g. lookupRoleArn", + "type": "string" + }, + "assumeRoleExternalId": { + "description": "External ID to use when assuming the bootstrap role (Default - No external ID)", + "type": "string" + }, + "requiresBootstrapStackVersion": { + "description": "Version of bootstrap stack required to use this role (Default - No bootstrap stack required)", + "type": "number" + }, + "bootstrapStackVersionSsmParameter": { + "description": "Name of SSM parameter with bootstrap stack version (Default - Discover SSM parameter by reading stack)", + "type": "string" + } + }, + "required": [ + "arn" + ] + }, "AssetManifestProperties": { "description": "Artifact properties for the Asset Manifest", "type": "object", @@ -598,7 +627,7 @@ } }, "returnAsymmetricSubnets": { - "description": "Whether to populate the subnetGroups field of the {@link VpcContextResponse},\nwhich contains potentially asymmetric subnet groups.", + "description": "Whether to populate the subnetGroups field of the{@linkVpcContextResponse},\nwhich contains potentially asymmetric subnet groups.", "default": false, "type": "boolean" }, diff --git a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json index 01d4f111912e9..5bdbc9d33c3b3 100644 --- a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json +++ b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json @@ -1 +1 @@ -{"version":"15.0.0"} \ No newline at end of file +{"version":"16.0.0"} \ No newline at end of file diff --git a/packages/@aws-cdk/core/lib/annotations.ts b/packages/@aws-cdk/core/lib/annotations.ts index f46c830c25757..03fb7a99bd80a 100644 --- a/packages/@aws-cdk/core/lib/annotations.ts +++ b/packages/@aws-cdk/core/lib/annotations.ts @@ -44,7 +44,7 @@ export class Annotations { /** * Adds an { "error": } metadata entry to this construct. - * The toolkit will fail synthesis when errors are reported. + * The toolkit will fail deployment of any stack that has errors reported against it. * @param message The error message. */ public addError(message: string) { diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts index d8e1f8818abc4..ed537e496128a 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts @@ -21,6 +21,12 @@ export const BOOTSTRAP_QUALIFIER_CONTEXT = '@aws-cdk/core:bootstrapQualifier'; */ const MIN_BOOTSTRAP_STACK_VERSION = 6; +/** + * The minimum bootstrap stack version required + * to use the lookup role. + */ +const MIN_LOOKUP_ROLE_BOOTSTRAP_STACK_VERSION = 8; + /** * Configuration properties for DefaultStackSynthesizer */ @@ -91,6 +97,25 @@ export interface DefaultStackSynthesizerProps { */ readonly lookupRoleArn?: string; + /** + * External ID to use when assuming lookup role + * + * @default - No external ID + */ + readonly lookupRoleExternalId?: string; + + /** + * Use the bootstrapped lookup role for (read-only) stack operations + * + * Use the lookup role when performing a `cdk diff`. If set to `false`, the + * `deploy role` credentials will be used to perform a `cdk diff`. + * + * Requires bootstrap stack version 8. + * + * @default true + */ + readonly useLookupRoleForStackOperations?: boolean; + /** * External ID to use when assuming role for image asset publishing * @@ -269,6 +294,7 @@ export class DefaultStackSynthesizer extends StackSynthesizer { private fileAssetPublishingRoleArn?: string; private imageAssetPublishingRoleArn?: string; private lookupRoleArn?: string; + private useLookupRoleForStackOperations: boolean; private qualifier?: string; private bucketPrefix?: string; private dockerTagPrefix?: string; @@ -279,6 +305,7 @@ export class DefaultStackSynthesizer extends StackSynthesizer { constructor(private readonly props: DefaultStackSynthesizerProps = {}) { super(); + this.useLookupRoleForStackOperations = props.useLookupRoleForStackOperations ?? true; for (const key in props) { if (props.hasOwnProperty(key)) { @@ -453,6 +480,12 @@ export class DefaultStackSynthesizer extends StackSynthesizer { requiresBootstrapStackVersion: MIN_BOOTSTRAP_STACK_VERSION, bootstrapStackVersionSsmParameter: this.bootstrapStackVersionSsmParameter, additionalDependencies: [artifactId], + lookupRole: this.useLookupRoleForStackOperations && this.lookupRoleArn ? { + arn: this.lookupRoleArn, + assumeRoleExternalId: this.props.lookupRoleExternalId, + requiresBootstrapStackVersion: MIN_LOOKUP_ROLE_BOOTSTRAP_STACK_VERSION, + bootstrapStackVersionSsmParameter: this.bootstrapStackVersionSsmParameter, + } : undefined, }); } diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts index 3b283eaae24ce..ea7c7745f2419 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts @@ -1,3 +1,4 @@ +import * as cxschema from '@aws-cdk/cloud-assembly-schema'; import { DockerImageAssetLocation, DockerImageAssetSource, FileAssetLocation, FileAssetSource } from '../assets'; import { ISynthesisSession } from '../construct-compat'; import { Stack } from '../stack'; @@ -100,6 +101,13 @@ export interface SynthesizeStackArtifactOptions { */ readonly cloudFormationExecutionRoleArn?: string; + /** + * The role to use to look up values from the target AWS account + * + * @default - None + */ + readonly lookupRole?: cxschema.BootstrapRole; + /** * If the stack template has already been included in the asset manifest, its asset URL * diff --git a/packages/@aws-cdk/custom-resources/README.md b/packages/@aws-cdk/custom-resources/README.md index 890255ee7bb12..cb30aada65d7f 100644 --- a/packages/@aws-cdk/custom-resources/README.md +++ b/packages/@aws-cdk/custom-resources/README.md @@ -31,14 +31,9 @@ with a `CustomResource` and a user-provided AWS Lambda function which implements the actual handler. ```ts -import { CustomResource } from '@aws-cdk/core'; -import * as logs from '@aws-cdk/aws-logs'; -import * as iam from '@aws-cdk/aws-iam'; -import * as cr from '@aws-cdk/custom-resources'; - -const onEvent = new lambda.Function(this, 'MyHandler', { /* ... */ }); - -const myRole = new iam.Role(this, 'MyRole', { /* ... */ }); +declare const onEvent: lambda.Function; +declare const isComplete: lambda.Function; +declare const myRole: iam.Role; const myProvider = new cr.Provider(this, 'MyProvider', { onEventHandler: onEvent, @@ -275,10 +270,12 @@ to all buckets: ```ts new lambda.Function(this, 'OnEventHandler', { - // ... + runtime: lambda.Runtime.NODEJS_14_X, + handler: 'index.handler', + code: lambda.Code.fromInline('my code'), initialPolicy: [ - new iam.PolicyStatement({ actions: [ 's3:GetObject*' ], resources: [ '*' ] }) - ] + new iam.PolicyStatement({ actions: [ 's3:GetObject*' ], resources: [ '*' ] }), + ], }); ``` @@ -309,12 +306,15 @@ The following example will create the file `folder/file1.txt` inside `myBucket` with the contents `hello!`. -```ts -new S3File(this, 'MyFile', { +```plaintext +// This example exists only for TypeScript + +declare const myBucket: s3.Bucket; +new cr.S3File(this, 'MyFile', { bucket: myBucket, objectKey: 'folder/file1.txt', // optional content: 'hello!', - public: true // optional + public: true, // optional }); ``` @@ -334,11 +334,14 @@ Checks that the textual contents of an S3 object matches a certain value. The ch The following example defines an `S3Assert` resource which waits until `myfile.txt` in `myBucket` exists and includes the contents `foo bar`: -```ts -new S3Assert(this, 'AssertMyFile', { +```plaintext +// This example exists only for TypeScript + +declare const myBucket: s3.Bucket; +new cr.S3Assert(this, 'AssertMyFile', { bucket: myBucket, objectKey: 'myfile.txt', - expectedContent: 'foo bar' + expectedContent: 'foo bar', }); ``` @@ -356,7 +359,9 @@ stacks it may be useful to manually set a name for the Provider Function Lambda have a predefined service token ARN. ```ts - +declare const onEvent: lambda.Function; +declare const isComplete: lambda.Function; +declare const myRole: iam.Role; const myProvider = new cr.Provider(this, 'MyProvider', { onEventHandler: onEvent, isCompleteHandler: isComplete, @@ -409,26 +414,30 @@ resources. Chained API calls can be achieved by creating dependencies: ```ts -const awsCustom1 = new AwsCustomResource(this, 'API1', { +const awsCustom1 = new cr.AwsCustomResource(this, 'API1', { onCreate: { service: '...', action: '...', - physicalResourceId: PhysicalResourceId.of('...') + physicalResourceId: cr.PhysicalResourceId.of('...'), }, - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }); -const awsCustom2 = new AwsCustomResource(this, 'API2', { +const awsCustom2 = new cr.AwsCustomResource(this, 'API2', { onCreate: { service: '...', - action: '...' + action: '...', parameters: { - text: awsCustom1.getResponseField('Items.0.text') + text: awsCustom1.getResponseField('Items.0.text'), }, - physicalResourceId: PhysicalResourceId.of('...') + physicalResourceId: cr.PhysicalResourceId.of('...'), }, - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) -}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), +}); ``` ### Physical Resource Id Parameter @@ -436,24 +445,26 @@ const awsCustom2 = new AwsCustomResource(this, 'API2', { Some AWS APIs may require passing the physical resource id in as a parameter for doing updates and deletes. You can pass it by using `PhysicalResourceIdReference`. ```ts -const awsCustom = new AwsCustomResource(this, '...', { +const awsCustom = new cr.AwsCustomResource(this, 'aws-custom', { onCreate: { service: '...', - action: '...' + action: '...', parameters: { - text: '...' + text: '...', }, - physicalResourceId: PhysicalResourceId.of('...') + physicalResourceId: cr.PhysicalResourceId.of('...'), }, onUpdate: { service: '...', - action: '...'. + action: '...', parameters: { text: '...', - resourceId: new PhysicalResourceIdReference() - } + resourceId: new cr.PhysicalResourceIdReference(), + }, }, - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }) ``` @@ -476,13 +487,16 @@ Use the `role`, `timeout`, `logRetention` and `functionName` properties to custo the Lambda function implementing the custom resource: ```ts -new AwsCustomResource(this, 'Customized', { - // other props here +declare const myRole: iam.Role; +new cr.AwsCustomResource(this, 'Customized', { role: myRole, // must be assumable by the `lambda.amazonaws.com` service principal - timeout: cdk.Duration.minutes(10) // defaults to 2 minutes - logRetention: logs.RetentionDays.ONE_WEEK // defaults to never delete logs + timeout: Duration.minutes(10), // defaults to 2 minutes + logRetention: logs.RetentionDays.ONE_WEEK, // defaults to never delete logs functionName: 'my-custom-name', // defaults to a CloudFormation generated name -}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), +}); ``` ### Restricting the output of the Custom Resource @@ -492,17 +506,19 @@ objects. If your API call returns an object that exceeds this limit, you can res the data returned by the custom resource to specific paths in the API response: ```ts -new AwsCustomResource(stack, 'ListObjects', { +new cr.AwsCustomResource(this, 'ListObjects', { onCreate: { service: 's3', action: 'listObjectsV2', parameters: { Bucket: 'my-bucket', }, - physicalResourceId: PhysicalResourceId.of('id'), + physicalResourceId: cr.PhysicalResourceId.of('id'), outputPaths: ['Contents.0.Key', 'Contents.1.Key'], // Output only the two first keys }, - policy: AwsCustomResourcePolicy.fromSdkCalls({ resources: AwsCustomResourcePolicy.ANY_RESOURCE }), + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }); ``` @@ -514,49 +530,56 @@ path in `PhysicalResourceId.fromResponse()`. #### Verify a domain with SES ```ts -const verifyDomainIdentity = new AwsCustomResource(this, 'VerifyDomainIdentity', { +import * as route53 from '@aws-cdk/aws-route53'; + +const verifyDomainIdentity = new cr.AwsCustomResource(this, 'VerifyDomainIdentity', { onCreate: { service: 'SES', action: 'verifyDomainIdentity', parameters: { - Domain: 'example.com' + Domain: 'example.com', }, - physicalResourceId: PhysicalResourceId.fromResponse('VerificationToken') // Use the token returned by the call as physical id + physicalResourceId: cr.PhysicalResourceId.fromResponse('VerificationToken'), // Use the token returned by the call as physical id }, - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }); +declare const zone: route53.HostedZone; new route53.TxtRecord(this, 'SESVerificationRecord', { zone, recordName: `_amazonses.example.com`, - values: [verifyDomainIdentity.getResponseField('VerificationToken')] + values: [verifyDomainIdentity.getResponseField('VerificationToken')], }); ``` #### Get the latest version of a secure SSM parameter ```ts -const getParameter = new AwsCustomResource(this, 'GetParameter', { +const getParameter = new cr.AwsCustomResource(this, 'GetParameter', { onUpdate: { // will also be called for a CREATE event service: 'SSM', action: 'getParameter', parameters: { Name: 'my-parameter', - WithDecryption: true + WithDecryption: true, }, - physicalResourceId: PhysicalResourceId.of(Date.now().toString()) // Update physical id to always fetch the latest version + physicalResourceId: cr.PhysicalResourceId.of(Date.now().toString()), // Update physical id to always fetch the latest version }, - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }); // Use the value in another construct with -getParameter.getResponseField('Parameter.Value') +getParameter.getResponseField('Parameter.Value'); ``` #### Associate a PrivateHostedZone with VPC shared from another account ```ts -const getParameter = new AwsCustomResource(this, 'AssociateVPCWithHostedZone', { +const getParameter = new cr.AwsCustomResource(this, 'AssociateVPCWithHostedZone', { onCreate: { assumedRoleArn: 'arn:aws:iam::OTHERACCOUNT:role/CrossAccount/ManageHostedZoneConnections', service: 'Route53', @@ -564,16 +587,17 @@ const getParameter = new AwsCustomResource(this, 'AssociateVPCWithHostedZone', { parameters: { HostedZoneId: 'hz-123', VPC: { - VPCId: 'vpc-123', - VPCRegion: 'region-for-vpc' - } + VPCId: 'vpc-123', + VPCRegion: 'region-for-vpc', + }, }, - physicalResourceId: PhysicalResourceId.of('${vpcStack.SharedVpc.VpcId}-${vpcStack.Region}-${PrivateHostedZone.HostedZoneId}') + physicalResourceId: cr.PhysicalResourceId.of('${vpcStack.SharedVpc.VpcId}-${vpcStack.Region}-${PrivateHostedZone.HostedZoneId}'), }, //Will ignore any resource and use the assumedRoleArn as resource and 'sts:AssumeRole' for service:action - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }); - ``` --- diff --git a/packages/@aws-cdk/custom-resources/package.json b/packages/@aws-cdk/custom-resources/package.json index e14d766ee5367..7a6064ef86ecb 100644 --- a/packages/@aws-cdk/custom-resources/package.json +++ b/packages/@aws-cdk/custom-resources/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/custom-resources/rosetta/default.ts-fixture b/packages/@aws-cdk/custom-resources/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..b80888ebeedd0 --- /dev/null +++ b/packages/@aws-cdk/custom-resources/rosetta/default.ts-fixture @@ -0,0 +1,16 @@ +// Fixture with packages imported, but nothing else +import { Construct } from 'constructs'; +import { CustomResource, Duration, Stack } from '@aws-cdk/core'; +import * as lambda from '@aws-cdk/aws-lambda'; +import * as iam from '@aws-cdk/aws-iam'; +import * as cr from '@aws-cdk/custom-resources'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as logs from '@aws-cdk/aws-logs'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts b/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts index 225f256e85f5f..66fc309a2593c 100644 --- a/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts +++ b/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts @@ -75,6 +75,13 @@ export class CloudFormationStackArtifact extends CloudArtifact { */ public readonly cloudFormationExecutionRoleArn?: string; + /** + * The role to use to look up values from the target AWS account + * + * @default - No role is assumed (current credentials are used) + */ + public readonly lookupRole?: cxschema.BootstrapRole; + /** * If the stack template has already been included in the asset manifest, its asset URL * @@ -135,6 +142,7 @@ export class CloudFormationStackArtifact extends CloudArtifact { this.bootstrapStackVersionSsmParameter = properties.bootstrapStackVersionSsmParameter; this.terminationProtection = properties.terminationProtection; this.validateOnSynth = properties.validateOnSynth; + this.lookupRole = properties.lookupRole; this.stackName = properties.stackName || artifactId; this.assets = this.findMetadataByType(cxschema.ArtifactMetadataEntryType.ASSET).map(e => e.data as cxschema.AssetMetadataEntry); diff --git a/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts b/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts index 0da0b027bbc65..ad2af9f62ef61 100644 --- a/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts +++ b/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts @@ -77,6 +77,33 @@ export interface SdkHttpOptions { const CACHED_ACCOUNT = Symbol('cached_account'); const CACHED_DEFAULT_CREDENTIALS = Symbol('cached_default_credentials'); +/** + * SDK configuration for a given environment + * 'forEnvironment' will attempt to assume a role and if it + * is not successful, then it will either: + * 1. Check to see if the default credentials (local credentials the CLI was executed with) + * are for the given environment. If they are then return those. + * 2. If the default credentials are not for the given environment then + * throw an error + * + * 'didAssumeRole' allows callers to whether they are receiving the assume role + * credentials or the default credentials. + */ +export interface SdkForEnvironment { + /** + * The SDK for the given environment + */ + readonly sdk: ISDK; + + /** + * Whether or not the assume role was successful. + * If the assume role was not successful (false) + * then that means that the 'sdk' returned contains + * the default credentials (not the assume role credentials) + */ + readonly didAssumeRole: boolean; +} + /** * Creates instances of the AWS SDK appropriate for a given account/region. * @@ -140,7 +167,11 @@ export class SdkProvider { * * The `environment` parameter is resolved first (see `resolveEnvironment()`). */ - public async forEnvironment(environment: cxapi.Environment, mode: Mode, options?: CredentialsOptions): Promise { + public async forEnvironment( + environment: cxapi.Environment, + mode: Mode, + options?: CredentialsOptions, + ): Promise { const env = await this.resolveEnvironment(environment); const baseCreds = await this.obtainBaseCredentials(env.account, mode); @@ -151,7 +182,7 @@ export class SdkProvider { // account. if (options?.assumeRoleArn === undefined) { if (baseCreds.source === 'incorrectDefault') { throw new Error(fmtObtainCredentialsError(env.account, baseCreds)); } - return new SDK(baseCreds.credentials, env.region, this.sdkOptions); + return { sdk: new SDK(baseCreds.credentials, env.region, this.sdkOptions), didAssumeRole: false }; } // We will proceed to AssumeRole using whatever we've been given. @@ -161,7 +192,7 @@ export class SdkProvider { // we can determine whether the AssumeRole call succeeds or not. try { await sdk.forceCredentialRetrieval(); - return sdk; + return { sdk, didAssumeRole: true }; } catch (e) { // AssumeRole failed. Proceed and warn *if and only if* the baseCredentials were already for the right account // or returned from a plugin. This is to cover some current setups for people using plugins or preferring to @@ -170,7 +201,7 @@ export class SdkProvider { if (baseCreds.source === 'correctDefault' || baseCreds.source === 'plugin') { debug(e.message); warning(`${fmtObtainedCredentials(baseCreds)} could not be used to assume '${options.assumeRoleArn}', but are for the right account. Proceeding anyway.`); - return new SDK(baseCreds.credentials, env.region, this.sdkOptions); + return { sdk: new SDK(baseCreds.credentials, env.region, this.sdkOptions), didAssumeRole: false }; } throw e; diff --git a/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts b/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts index 49f97e71332c3..8cece9d8eed30 100644 --- a/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts +++ b/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts @@ -27,7 +27,7 @@ export class BootstrapStack { toolkitStackName = toolkitStackName ?? DEFAULT_TOOLKIT_STACK_NAME; const resolvedEnvironment = await sdkProvider.resolveEnvironment(environment); - const sdk = await sdkProvider.forEnvironment(resolvedEnvironment, Mode.ForWriting); + const sdk = (await sdkProvider.forEnvironment(resolvedEnvironment, Mode.ForWriting)).sdk; const currentToolkitInfo = await ToolkitInfo.lookup(resolvedEnvironment, sdk, toolkitStackName); diff --git a/packages/aws-cdk/lib/api/cloudformation-deployments.ts b/packages/aws-cdk/lib/api/cloudformation-deployments.ts index fb7c5410faf3d..c461c5ac24dc5 100644 --- a/packages/aws-cdk/lib/api/cloudformation-deployments.ts +++ b/packages/aws-cdk/lib/api/cloudformation-deployments.ts @@ -1,9 +1,9 @@ import * as cxapi from '@aws-cdk/cx-api'; import { AssetManifest } from 'cdk-assets'; import { Tag } from '../cdk-toolkit'; -import { debug } from '../logging'; +import { debug, warning } from '../logging'; import { publishAssets } from '../util/asset-publishing'; -import { Mode, SdkProvider } from './aws-auth'; +import { Mode, SdkProvider, ISDK } from './aws-auth'; import { deployStack, DeployStackResult, destroyStack } from './deploy-stack'; import { ToolkitInfo } from './toolkit-info'; import { CloudFormationStack, Template } from './util/cloudformation'; @@ -171,6 +171,54 @@ export interface ProvisionerProps { sdkProvider: SdkProvider; } +/** + * SDK obtained by assuming the lookup role + * for a given environment + */ +export interface PreparedSdkWithLookupRoleForEnvironment { + /** + * The SDK for the given environment + */ + readonly sdk: ISDK; + + /** + * The resolved environment for the stack + * (no more 'unknown-account/unknown-region') + */ + readonly resolvedEnvironment: cxapi.Environment; + + /** + * Whether or not the assume role was successful. + * If the assume role was not successful (false) + * then that means that the 'sdk' returned contains + * the default credentials (not the assume role credentials) + */ + readonly didAssumeRole: boolean; +} + +/** + * SDK obtained by assuming the deploy role + * for a given environment + */ +export interface PreparedSdkForEnvironment { + /** + * The SDK for the given environment + */ + readonly stackSdk: ISDK; + + /** + * The resolved environment for the stack + * (no more 'unknown-account/unknown-region') + */ + readonly resolvedEnvironment: cxapi.Environment; + /** + * The Execution Role that should be passed to CloudFormation. + * + * @default - no execution role is used + */ + readonly cloudFormationRoleArn?: string; +} + /** * Helper class for CloudFormation deployments * @@ -186,7 +234,19 @@ export class CloudFormationDeployments { public async readCurrentTemplate(stackArtifact: cxapi.CloudFormationStackArtifact): Promise