From 2ec294803427675b0ba594e929f32aca1ffdb075 Mon Sep 17 00:00:00 2001 From: Elad Ben-Israel Date: Wed, 9 Dec 2020 19:14:04 +0200 Subject: [PATCH] fix(eks): kubectl provider out-of-memory for large manifests/charts (now 1GiB) (#11957) Increase the default memory size of the kubectl provider's lambda function to 1GiB and introduce a `kubectlMemory` option that can be used to control memory allocation if needed. Fixes #11787 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-eks/README.md | 25 ++++++++++- packages/@aws-cdk/aws-eks/lib/cluster.ts | 34 ++++++++++++-- .../@aws-cdk/aws-eks/lib/kubectl-provider.ts | 3 +- .../@aws-cdk/aws-eks/test/test.cluster.ts | 44 +++++++++++++++++-- 4 files changed, 97 insertions(+), 9 deletions(-) diff --git a/packages/@aws-cdk/aws-eks/README.md b/packages/@aws-cdk/aws-eks/README.md index 1b09a3ac995e5..cf08a43a16bfe 100644 --- a/packages/@aws-cdk/aws-eks/README.md +++ b/packages/@aws-cdk/aws-eks/README.md @@ -450,7 +450,11 @@ The `ClusterHandler` is a Lambda function responsible to interact the EKS API in ### Kubectl Support -The resources are created in the cluster by running `kubectl apply` from a python lambda function. You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy: +The resources are created in the cluster by running `kubectl apply` from a python lambda function. + +#### Environment + +You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy: ```ts const cluster = new eks.Cluster(this, 'hello-eks', { @@ -461,6 +465,8 @@ const cluster = new eks.Cluster(this, 'hello-eks', { }); ``` +#### Runtime + By default, the `kubectl`, `helm` and `aws` commands used to operate the cluster are provided by an AWS Lambda Layer from the AWS Serverless Application in [aws-lambda-layer-kubectl](https://github.com/aws-samples/aws-lambda-layer-kubectl). In most cases this should be sufficient. You can provide a custom layer in case the default layer does not meet your @@ -496,6 +502,23 @@ const cluster = eks.Cluster.fromClusterAttributes(this, 'MyCluster', { > Instructions on how to build `layer.zip` can be found > [here](https://github.com/aws-samples/aws-lambda-layer-kubectl/blob/master/cdk/README.md). +#### Memory + +By default, the kubectl provider is configured with 1024MiB of memory. You can use the `kubectlMemory` option to specify the memory size for the AWS Lambda function: + +```ts +import { Size } from '@aws-cdk/core'; + +new eks.Cluster(this, 'MyCluster', { + kubectlMemory: Size.gibibytes(4) +}); + +// or +eks.Cluster.fromClusterAttributes(this, 'MyCluster', { + kubectlMemory: Size.gibibytes(4) +}); +``` + ### ARM64 Support Instance types with `ARM64` architecture are supported in both managed nodegroup and self-managed capacity. Simply specify an ARM64 `instanceType` (such as `m6g.medium`), and the latest diff --git a/packages/@aws-cdk/aws-eks/lib/cluster.ts b/packages/@aws-cdk/aws-eks/lib/cluster.ts index 54cbeb9379d02..beee73cca05dd 100644 --- a/packages/@aws-cdk/aws-eks/lib/cluster.ts +++ b/packages/@aws-cdk/aws-eks/lib/cluster.ts @@ -6,7 +6,7 @@ import * as iam from '@aws-cdk/aws-iam'; import * as kms from '@aws-cdk/aws-kms'; import * as lambda from '@aws-cdk/aws-lambda'; import * as ssm from '@aws-cdk/aws-ssm'; -import { Annotations, CfnOutput, CfnResource, IResource, Resource, Stack, Tags, Token, Duration } from '@aws-cdk/core'; +import { Annotations, CfnOutput, CfnResource, IResource, Resource, Stack, Tags, Token, Duration, Size } from '@aws-cdk/core'; import { Construct, Node } from 'constructs'; import * as YAML from 'yaml'; import { AwsAuth } from './aws-auth'; @@ -92,14 +92,13 @@ export interface ICluster extends IResource, ec2.IConnectable { /** * Custom environment variables when running `kubectl` against this cluster. - * @default - no additional environment variables */ readonly kubectlEnvironment?: { [key: string]: string }; /** * A security group to use for `kubectl` execution. * - * @default - If not specified, the k8s endpoint is expected to be accessible + * If this is undefined, the k8s endpoint is expected to be accessible * publicly. */ readonly kubectlSecurityGroup?: ec2.ISecurityGroup; @@ -107,7 +106,7 @@ export interface ICluster extends IResource, ec2.IConnectable { /** * Subnets to host the `kubectl` compute resources. * - * @default - If not specified, the k8s endpoint is expected to be accessible + * If this is undefined, the k8s endpoint is expected to be accessible * publicly. */ readonly kubectlPrivateSubnets?: ec2.ISubnet[]; @@ -119,6 +118,10 @@ export interface ICluster extends IResource, ec2.IConnectable { */ readonly kubectlLayer?: lambda.ILayerVersion; + /** + * Amount of memory to allocate to the provider's lambda function. + */ + readonly kubectlMemory?: Size; /** * Creates a new service account with corresponding IAM Role (IRSA). * @@ -271,6 +274,13 @@ export interface ClusterAttributes { * @see https://github.com/aws-samples/aws-lambda-layer-kubectl */ readonly kubectlLayer?: lambda.ILayerVersion; + + /** + * Amount of memory to allocate to the provider's lambda function. + * + * @default Size.gibibytes(1) + */ + readonly kubectlMemory?: Size; } /** @@ -416,6 +426,13 @@ export interface ClusterOptions extends CommonClusterOptions { * @see https://github.com/aws-samples/aws-lambda-layer-kubectl */ readonly kubectlLayer?: lambda.ILayerVersion; + + /** + * Amount of memory to allocate to the provider's lambda function. + * + * @default Size.gibibytes(1) + */ + readonly kubectlMemory?: Size; } /** @@ -630,6 +647,7 @@ abstract class ClusterBase extends Resource implements ICluster { public abstract readonly kubectlEnvironment?: { [key: string]: string }; public abstract readonly kubectlSecurityGroup?: ec2.ISecurityGroup; public abstract readonly kubectlPrivateSubnets?: ec2.ISubnet[]; + public abstract readonly kubectlMemory?: Size; public abstract readonly openIdConnectProvider: iam.IOpenIdConnectProvider; /** @@ -842,6 +860,11 @@ export class Cluster extends ClusterBase { */ public readonly kubectlLayer?: lambda.ILayerVersion; + /** + * The amount of memory allocated to the kubectl provider's lambda function. + */ + public readonly kubectlMemory?: Size; + /** * If this cluster is kubectl-enabled, returns the `ClusterResource` object * that manages it. If this cluster is not kubectl-enabled (i.e. uses the @@ -929,6 +952,7 @@ export class Cluster extends ClusterBase { this.endpointAccess = props.endpointAccess ?? EndpointAccess.PUBLIC_AND_PRIVATE; this.kubectlEnvironment = props.kubectlEnvironment; this.kubectlLayer = props.kubectlLayer; + this.kubectlMemory = props.kubectlMemory; const privateSubents = this.selectPrivateSubnets().slice(0, 16); const publicAccessDisabled = !this.endpointAccess._config.publicAccess; @@ -1630,6 +1654,7 @@ class ImportedCluster extends ClusterBase { public readonly kubectlSecurityGroup?: ec2.ISecurityGroup | undefined; public readonly kubectlPrivateSubnets?: ec2.ISubnet[] | undefined; public readonly kubectlLayer?: lambda.ILayerVersion; + public readonly kubectlMemory?: Size; constructor(scope: Construct, id: string, private readonly props: ClusterAttributes) { super(scope, id); @@ -1641,6 +1666,7 @@ class ImportedCluster extends ClusterBase { this.kubectlEnvironment = props.kubectlEnvironment; this.kubectlPrivateSubnets = props.kubectlPrivateSubnetIds ? props.kubectlPrivateSubnetIds.map((subnetid, index) => ec2.Subnet.fromSubnetId(this, `KubectlSubnet${index}`, subnetid)) : undefined; this.kubectlLayer = props.kubectlLayer; + this.kubectlMemory = props.kubectlMemory; let i = 1; for (const sgid of props.securityGroupIds ?? []) { diff --git a/packages/@aws-cdk/aws-eks/lib/kubectl-provider.ts b/packages/@aws-cdk/aws-eks/lib/kubectl-provider.ts index be23ac355c42e..4cf2d254099f6 100644 --- a/packages/@aws-cdk/aws-eks/lib/kubectl-provider.ts +++ b/packages/@aws-cdk/aws-eks/lib/kubectl-provider.ts @@ -67,6 +67,7 @@ export class KubectlProvider extends NestedStack { } const layer = cluster.kubectlLayer ?? getOrCreateKubectlLayer(this); + const memorySize = cluster.kubectlMemory ? cluster.kubectlMemory.toMebibytes() : 1024; const handler = new lambda.Function(this, 'Handler', { code: lambda.Code.fromAsset(path.join(__dirname, 'kubectl-handler')), @@ -75,7 +76,7 @@ export class KubectlProvider extends NestedStack { timeout: Duration.minutes(15), description: 'onEvent handler for EKS kubectl resource provider', layers: [layer], - memorySize: 256, + memorySize, environment: cluster.kubectlEnvironment, // defined only when using private access diff --git a/packages/@aws-cdk/aws-eks/test/test.cluster.ts b/packages/@aws-cdk/aws-eks/test/test.cluster.ts index facdf1880d426..409fb829bbc93 100644 --- a/packages/@aws-cdk/aws-eks/test/test.cluster.ts +++ b/packages/@aws-cdk/aws-eks/test/test.cluster.ts @@ -12,7 +12,7 @@ import * as constructs from 'constructs'; import { Test } from 'nodeunit'; import * as YAML from 'yaml'; import * as eks from '../lib'; -import { getOrCreateKubectlLayer } from '../lib/kubectl-provider'; +import * as kubectl from '../lib/kubectl-provider'; import { BottleRocketImage } from '../lib/private/bottlerocket'; import { testFixture, testFixtureNoVpc } from './util'; @@ -391,7 +391,7 @@ export = { // WHEN const vpc = new ec2.Vpc(stack, 'VPC'); new eks.Cluster(stack, 'Cluster', { vpc, defaultCapacity: 0, version: CLUSTER_VERSION }); - getOrCreateKubectlLayer(stack); + kubectl.getOrCreateKubectlLayer(stack); // THEN expect(stack).to(haveResource('Custom::AWSCDK-EKS-Cluster')); @@ -411,7 +411,7 @@ export = { // WHEN const vpc = new ec2.Vpc(stack, 'VPC'); new eks.Cluster(stack, 'Cluster', { vpc, defaultCapacity: 0, version: CLUSTER_VERSION }); - getOrCreateKubectlLayer(stack); + kubectl.getOrCreateKubectlLayer(stack); // THEN expect(stack).to(haveResource('Custom::AWSCDK-EKS-Cluster')); @@ -2583,4 +2583,42 @@ export = { })); test.done(); }, + + 'custom memory size for kubectl provider'(test: Test) { + // GIVEN + const { stack, vpc, app } = testFixture(); + + // WHEN + new eks.Cluster(stack, 'Cluster', { + vpc, + version: CLUSTER_VERSION, + kubectlMemory: cdk.Size.gibibytes(2), + }); + + // THEN + const casm = app.synth(); + const providerNestedStackTemplate = JSON.parse(fs.readFileSync(path.join(casm.directory, 'StackawscdkawseksKubectlProvider7346F799.nested.template.json'), 'utf-8')); + test.equal(providerNestedStackTemplate?.Resources?.Handler886CB40B?.Properties?.MemorySize, 2048); + test.done(); + }, + + 'custom memory size for imported clusters'(test: Test) { + // GIVEN + const { stack, app } = testFixture(); + + // WHEN + const cluster = eks.Cluster.fromClusterAttributes(stack, 'Imported', { + clusterName: 'my-cluster', + kubectlRoleArn: 'arn:aws:iam::123456789012:role/MyRole', + kubectlMemory: cdk.Size.gibibytes(4), + }); + + cluster.addManifest('foo', { bar: 123 }); + + // THEN + const casm = app.synth(); + const providerNestedStackTemplate = JSON.parse(fs.readFileSync(path.join(casm.directory, 'StackStackImported1CBA9C50KubectlProviderAA00BA49.nested.template.json'), 'utf-8')); + test.equal(providerNestedStackTemplate?.Resources?.Handler886CB40B?.Properties?.MemorySize, 4096); + test.done(); + }, };