Skip to content

Commit

Permalink
Merge branch 'master' into fix/lambda-python/bundling
Browse files Browse the repository at this point in the history
  • Loading branch information
mergify[bot] authored Jan 10, 2022
2 parents beccc6d + a70a2e6 commit 94633a6
Show file tree
Hide file tree
Showing 73 changed files with 1,211 additions and 425 deletions.
22 changes: 13 additions & 9 deletions packages/@aws-cdk/aws-backup/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ const plan = backup.BackupPlan.dailyWeeklyMonthly5YearRetention(this, 'Plan');

Assigning resources to a plan can be done with `addSelection()`:

```ts fixture=with-plan
```ts
declare const plan: backup.BackupPlan;
const myTable = dynamodb.Table.fromTableName(this, 'Table', 'myTableName');
const myCoolConstruct = new Construct(this, 'MyCoolConstruct');

Expand All @@ -50,16 +51,17 @@ created for the selection. The `BackupSelection` implements `IGrantable`.

To add rules to a plan, use `addRule()`:

```ts fixture=with-plan
```ts
declare const plan: backup.BackupPlan;
plan.addRule(new backup.BackupPlanRule({
completionWindow: Duration.hours(2),
startWindow: Duration.hours(1),
scheduleExpression: events.Schedule.cron({ // Only cron expressions are supported
day: '15',
hour: '3',
minute: '30'
minute: '30',
}),
moveToColdStorageAfter: Duration.days(30)
moveToColdStorageAfter: Duration.days(30),
}));
```

Expand All @@ -69,7 +71,8 @@ If no value is specified, the retention period is set to 35 days which is the ma
Property `moveToColdStorageAfter` must not be specified because PITR does not support this option.
This example defines an AWS Backup rule with PITR and a retention period set to 14 days:

```ts fixture=with-plan
```ts
declare const plan: backup.BackupPlan;
plan.addRule(new backup.BackupPlanRule({
enableContinuousBackup: true,
deleteAfter: Duration.days(14),
Expand All @@ -78,7 +81,8 @@ plan.addRule(new backup.BackupPlanRule({

Ready-made rules are also available:

```ts fixture=with-plan
```ts
declare const plan: backup.BackupPlan;
plan.addRule(backup.BackupPlanRule.daily());
plan.addRule(backup.BackupPlanRule.weekly());
```
Expand Down Expand Up @@ -152,7 +156,7 @@ const vault = new backup.BackupVault(this, 'Vault', {
},
}),
],
});
}),
})
```

Expand All @@ -166,8 +170,8 @@ new backup.BackupVault(this, 'Vault', {
blockRecoveryPointDeletion: true,
});

const plan = backup.BackupPlan.dailyMonthly1YearRetention(this, 'Plan');
plan.backupVault.blockRecoveryPointDeletion();
declare const backupVault: backup.BackupVault;
backupVault.blockRecoveryPointDeletion();
```

By default access is not restricted.
Expand Down
9 changes: 8 additions & 1 deletion packages/@aws-cdk/aws-backup/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,14 @@
]
}
},
"projectReferences": true
"projectReferences": true,
"metadata": {
"jsii": {
"rosetta": {
"strict": true
}
}
}
},
"repository": {
"type": "git",
Expand Down
2 changes: 2 additions & 0 deletions packages/@aws-cdk/aws-backup/rosetta/default.ts-fixture
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ import { Duration, RemovalPolicy, Stack } from '@aws-cdk/core';
import { Construct } from 'constructs';
import * as backup from '@aws-cdk/aws-backup';
import * as iam from '@aws-cdk/aws-iam';
import * as dynamodb from '@aws-cdk/aws-dynamodb';
import * as events from '@aws-cdk/aws-events';
import * as kms from '@aws-cdk/aws-kms';
import * as sns from '@aws-cdk/aws-sns';

Expand Down
16 changes: 0 additions & 16 deletions packages/@aws-cdk/aws-backup/rosetta/with-plan.ts-fixture

This file was deleted.

20 changes: 12 additions & 8 deletions packages/@aws-cdk/aws-certificatemanager/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,6 @@ If Amazon Route 53 is your DNS provider for the requested domain, the DNS record
created automatically:

```ts
import * as acm from '@aws-cdk/aws-certificatemanager';
import * as route53 from '@aws-cdk/aws-route53';

const myHostedZone = new route53.HostedZone(this, 'HostedZone', {
zoneName: 'example.com',
});
Expand Down Expand Up @@ -106,6 +103,7 @@ The `DnsValidatedCertificate` construct exists to facilitate creating these cert
Route53-based DNS validation.

```ts
declare const myHostedZone: route53.HostedZone;
new acm.DnsValidatedCertificate(this, 'CrossRegionCertificate', {
domainName: 'hello.example.com',
hostedZone: myHostedZone,
Expand All @@ -120,10 +118,10 @@ AWS Certificate Manager can create [private certificates](https://docs.aws.amazo
```ts
import * as acmpca from '@aws-cdk/aws-acmpca';

new acm.PrivateCertificate(stack, 'PrivateCertificate', {
new acm.PrivateCertificate(this, 'PrivateCertificate', {
domainName: 'test.example.com',
subjectAlternativeNames: ['cool.example.com', 'test.example.net'], // optional
certificateAuthority: acmpca.CertificateAuthority.fromCertificateAuthorityArn(stack, 'CA',
certificateAuthority: acmpca.CertificateAuthority.fromCertificateAuthorityArn(this, 'CA',
'arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/023077d8-2bfa-4eb0-8f22-05c96deade77'),
});
```
Expand All @@ -134,7 +132,7 @@ If you want to import an existing certificate, you can do so from its ARN:

```ts
const arn = 'arn:aws:...';
const certificate = Certificate.fromCertificateArn(this, 'Certificate', arn);
const certificate = acm.Certificate.fromCertificateArn(this, 'Certificate', arn);
```

## Sharing between Stacks
Expand All @@ -152,8 +150,14 @@ An alarm can be created to determine whether a certificate is soon due for
renewal ussing the following code:

```ts
const certificate = new Certificate(this, 'Certificate', { /* ... */ });
certificate.metricDaysToExpiry().createAlarm({
import * as cloudwatch from '@aws-cdk/aws-cloudwatch';

declare const myHostedZone: route53.HostedZone;
const certificate = new acm.Certificate(this, 'Certificate', {
domainName: 'hello.example.com',
validation: acm.CertificateValidation.fromDns(myHostedZone),
});
certificate.metricDaysToExpiry().createAlarm(this, 'Alarm', {
comparisonOperator: cloudwatch.ComparisonOperator.LESS_THAN_THRESHOLD,
evaluationPeriods: 1,
threshold: 45, // Automatic rotation happens between 60 and 45 days before expiry
Expand Down
9 changes: 8 additions & 1 deletion packages/@aws-cdk/aws-certificatemanager/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,14 @@
]
}
},
"projectReferences": true
"projectReferences": true,
"metadata": {
"jsii": {
"rosetta": {
"strict": true
}
}
}
},
"repository": {
"type": "git",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// Fixture with packages imported, but nothing else
import { Stack } from '@aws-cdk/core';
import { Construct } from 'constructs';
import * as acm from '@aws-cdk/aws-certificatemanager';
import * as route53 from '@aws-cdk/aws-route53';

class Fixture extends Stack {
constructor(scope: Construct, id: string) {
super(scope, id);
/// here
}
}
14 changes: 6 additions & 8 deletions packages/@aws-cdk/aws-cloudfront-origins/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@ An S3 bucket can be added as an origin. If the bucket is configured as a website
documents.

```ts
import * as cloudfront from '@aws-cdk/aws-cloudfront';
import * as origins from '@aws-cdk/aws-cloudfront-origins';

const myBucket = new s3.Bucket(this, 'myBucket');
new cloudfront.Distribution(this, 'myDist', {
defaultBehavior: { origin: new origins.S3Origin(myBucket) },
Expand All @@ -38,9 +35,6 @@ URLs and not S3 URLs directly. Alternatively, a custom origin access identity ca
You can configure CloudFront to add custom headers to the requests that it sends to your origin. These custom headers enable you to send and gather information from your origin that you don’t get with typical viewer requests. These headers can even be customized for each origin. CloudFront supports custom headers for both for custom and Amazon S3 origins.

```ts
import * as cloudfront from '@aws-cdk/aws-cloudfront';
import * as origins from '@aws-cdk/aws-cloudfront-origins';

const myBucket = new s3.Bucket(this, 'myBucket');
new cloudfront.Distribution(this, 'myDist', {
defaultBehavior: { origin: new origins.S3Origin(myBucket, {
Expand All @@ -60,12 +54,12 @@ accessible (`internetFacing` is true). Both Application and Network load balance
import * as ec2 from '@aws-cdk/aws-ec2';
import * as elbv2 from '@aws-cdk/aws-elasticloadbalancingv2';

const vpc = new ec2.Vpc(...);
declare const vpc: ec2.Vpc;
// Create an application load balancer in a VPC. 'internetFacing' must be 'true'
// for CloudFront to access the load balancer and use it as an origin.
const lb = new elbv2.ApplicationLoadBalancer(this, 'LB', {
vpc,
internetFacing: true
internetFacing: true,
});
new cloudfront.Distribution(this, 'myDist', {
defaultBehavior: { origin: new origins.LoadBalancerV2Origin(lb) },
Expand All @@ -75,6 +69,9 @@ new cloudfront.Distribution(this, 'myDist', {
The origin can also be customized to respond on different ports, have different connection properties, etc.

```ts
import * as elbv2 from '@aws-cdk/aws-elasticloadbalancingv2';

declare const loadBalancer: elbv2.ApplicationLoadBalancer;
const origin = new origins.LoadBalancerV2Origin(loadBalancer, {
connectionAttempts: 3,
connectionTimeout: Duration.seconds(5),
Expand Down Expand Up @@ -103,6 +100,7 @@ CloudFront automatically switches to the secondary origin.
You achieve that behavior in the CDK using the `OriginGroup` class:

```ts
const myBucket = new s3.Bucket(this, 'myBucket');
new cloudfront.Distribution(this, 'myDist', {
defaultBehavior: {
origin: new origins.OriginGroup({
Expand Down
9 changes: 8 additions & 1 deletion packages/@aws-cdk/aws-cloudfront-origins/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,14 @@
]
}
},
"projectReferences": true
"projectReferences": true,
"metadata": {
"jsii": {
"rosetta": {
"strict": true
}
}
}
},
"repository": {
"type": "git",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// Fixture with packages imported, but nothing else
import { Duration, Stack } from '@aws-cdk/core';
import { Construct } from 'constructs';
import * as cloudfront from '@aws-cdk/aws-cloudfront';
import * as origins from '@aws-cdk/aws-cloudfront-origins';
import * as s3 from '@aws-cdk/aws-s3';

class Fixture extends Stack {
constructor(scope: Construct, id: string) {
super(scope, id);

/// here

}
}
23 changes: 13 additions & 10 deletions packages/@aws-cdk/aws-cloudtrail/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ default retention setting. The following code enables sending CloudWatch logs bu
period for the created Log Group.

```ts
import * as logs from '@aws-cdk/aws-logs';

const trail = new cloudtrail.Trail(this, 'CloudTrail', {
sendToCloudWatchLogs: true,
cloudWatchLogsRetention: logs.RetentionDays.FOUR_MONTHS,
Expand All @@ -88,18 +90,18 @@ The following code filters events for S3 from a specific AWS account and trigger

```ts
const myFunctionHandler = new lambda.Function(this, 'MyFunction', {
code: lambda.Code.fromAsset('resource/myfunction');
code: lambda.Code.fromAsset('resource/myfunction'),
runtime: lambda.Runtime.NODEJS_12_X,
handler: 'index.handler',
});

const eventRule = Trail.onEvent(this, 'MyCloudWatchEvent', {
target: new eventTargets.LambdaFunction(myFunctionHandler),
const eventRule = cloudtrail.Trail.onEvent(this, 'MyCloudWatchEvent', {
target: new targets.LambdaFunction(myFunctionHandler),
});

eventRule.addEventPattern({
account: '123456789012',
source: 'aws.s3',
account: ['123456789012'],
source: ['aws.s3'],
});
```

Expand Down Expand Up @@ -141,7 +143,7 @@ The following code configures the `Trail` to only track management events that a
```ts
const trail = new cloudtrail.Trail(this, 'CloudTrail', {
// ...
managementEvents: ReadWriteType.READ_ONLY,
managementEvents: cloudtrail.ReadWriteType.READ_ONLY,
});
```

Expand All @@ -157,13 +159,14 @@ be used to configure logging of S3 data events for specific buckets and specific
configures logging of S3 data events for `fooBucket` and with object prefix `bar/`.

```ts
import * as cloudtrail from '@aws-cdk/aws-cloudtrail';
import * as s3 from '@aws-cdk/aws-s3';

const trail = new cloudtrail.Trail(this, 'MyAmazingCloudTrail');
declare const bucket: s3.Bucket;

// Adds an event selector to the bucket foo
trail.addS3EventSelector([{
bucket: fooBucket, // 'fooBucket' is of type s3.IBucket
bucket,
objectPrefix: 'bar/',
}]);
```
Expand All @@ -174,12 +177,12 @@ configures logging of Lambda data events for a specific Function.

```ts
const trail = new cloudtrail.Trail(this, 'MyAmazingCloudTrail');
const amazingFunction = new lambda.Function(stack, 'AnAmazingFunction', {
const amazingFunction = new lambda.Function(this, 'AnAmazingFunction', {
runtime: lambda.Runtime.NODEJS_12_X,
handler: "hello.handler",
code: lambda.Code.fromAsset("lambda"),
});

// Add an event selector to log data events for the provided Lambda functions.
trail.addLambdaEventSelector([ lambdaFunction ]);
trail.addLambdaEventSelector([ amazingFunction ]);
```
9 changes: 8 additions & 1 deletion packages/@aws-cdk/aws-cloudtrail/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,14 @@
]
}
},
"projectReferences": true
"projectReferences": true,
"metadata": {
"jsii": {
"rosetta": {
"strict": true
}
}
}
},
"repository": {
"type": "git",
Expand Down
14 changes: 14 additions & 0 deletions packages/@aws-cdk/aws-cloudtrail/rosetta/default.ts-fixture
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
// Fixture with packages imported, but nothing else
import { Stack } from '@aws-cdk/core';
import { Construct } from 'constructs';
import * as cloudtrail from '@aws-cdk/aws-cloudtrail';
import * as sns from '@aws-cdk/aws-sns';
import * as lambda from '@aws-cdk/aws-lambda';
import * as targets from '@aws-cdk/aws-events-targets';

class Fixture extends Stack {
constructor(scope: Construct, id: string) {
super(scope, id);
/// here
}
}
Loading

0 comments on commit 94633a6

Please sign in to comment.