diff --git a/pkg/iac-providers/cft/v1/load-file.go b/pkg/iac-providers/cft/v1/load-file.go index 621307272..35283765d 100644 --- a/pkg/iac-providers/cft/v1/load-file.go +++ b/pkg/iac-providers/cft/v1/load-file.go @@ -93,16 +93,28 @@ func (a *CFTV1) extractTemplate(file string, data *[]byte) (*cloudformation.Temp switch fileExt { case YAMLExtension, YAMLExtension2: - template, err := goformation.ParseYAML(*data) + zap.S().Debug("sanitizing cft template file", zap.String("file", file)) + sanitized, err := a.sanitizeCftTemplate(*data, true) if err != nil { - zap.S().Debug("failed to parse file", zap.String("file", file)) + zap.S().Debug("failed to sanitize cft template file", zap.String("file", file), zap.Error(err)) + return nil, err + } + template, err := goformation.ParseYAML(sanitized) + if err != nil { + zap.S().Debug("failed to parse file", zap.String("file", file), zap.Error(err)) return nil, err } return template, nil case JSONExtension: - template, err := goformation.ParseJSON(*data) + zap.S().Debug("sanitizing cft template file", zap.String("file", file)) + sanitized, err := a.sanitizeCftTemplate(*data, false) + if err != nil { + zap.S().Debug("failed to sanitize cft template file", zap.String("file", file), zap.Error(err)) + return nil, err + } + template, err := goformation.ParseJSON(sanitized) if err != nil { - zap.S().Debug("failed to parse file", zap.String("file", file)) + zap.S().Debug("failed to parse file", zap.String("file", file), zap.Error(err)) return nil, err } return template, nil diff --git a/pkg/iac-providers/cft/v1/load-file_test.go b/pkg/iac-providers/cft/v1/load-file_test.go index 86068153f..bc2259b20 100644 --- a/pkg/iac-providers/cft/v1/load-file_test.go +++ b/pkg/iac-providers/cft/v1/load-file_test.go @@ -61,7 +61,7 @@ func TestLoadIacFile(t *testing.T) { filePath: "nonexistent.txt", typeOnly: false, }, { - wantErr: fmt.Errorf(testErrString3), + wantErr: fmt.Errorf("error while resolving intrinsic functions, error %w", fmt.Errorf(testErrString3)), want: output.AllResourceConfigs{}, cftv1: CFTV1{}, name: "invalid file", diff --git a/pkg/iac-providers/cft/v1/sanitize-cft-template.go b/pkg/iac-providers/cft/v1/sanitize-cft-template.go new file mode 100644 index 000000000..b2a0a3a81 --- /dev/null +++ b/pkg/iac-providers/cft/v1/sanitize-cft-template.go @@ -0,0 +1,370 @@ +/* + Copyright (C) 2020 Accurics, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cftv1 + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/awslabs/goformation/v4/cloudformation" + "github.com/awslabs/goformation/v4/cloudformation/policies" + "github.com/awslabs/goformation/v4/intrinsics" + "go.uber.org/zap" +) + +func (a *CFTV1) sanitizeCftTemplate(data []byte, isYAML bool) ([]byte, error) { + var ( + intrinsified []byte + err error + ) + + if isYAML { + // Process all AWS CloudFormation intrinsic functions (e.g. Fn::Join) + intrinsified, err = intrinsics.ProcessYAML(data, nil) + if err != nil { + return nil, fmt.Errorf("error while resolving intrinsic functions, error %w", err) + } + } else { + // Process all AWS CloudFormation intrinsic functions (e.g. Fn::Join) + intrinsified, err = intrinsics.ProcessJSON(data, nil) + if err != nil { + return nil, fmt.Errorf("error while resolving intrinsic functions, error %w", err) + } + } + + templateFileMap := make(map[string]interface{}) + + err = json.Unmarshal(intrinsified, &templateFileMap) + if err != nil { + return nil, err + } + + // sanitize Parameters + params, ok := templateFileMap["Parameters"] + if ok { + pMap, ok := params.(map[string]interface{}) + if ok { + for pName := range pMap { + zap.S().Debug(fmt.Sprintf("inspecting parameter '%s'", pName)) + inspectAndSanitizeParameters(pMap[pName]) + } + } + } + + // sanitize resources + r, ok := templateFileMap["Resources"] + if ok { + rMap, ok := r.(map[string]interface{}) + if ok { + for rName := range rMap { + zap.S().Debug("inspecting resource", zap.String("Resource Name", rName)) + if shouldRemoveResource := inspectAndSanitizeResource(rMap[rName]); shouldRemoveResource { + // we would remove any resource from the map for which goformation doesn't have a type defined + delete(rMap, rName) + } + } + } + } + + sanitized, err := json.Marshal(templateFileMap) + if err != nil { + return nil, err + } + return sanitized, nil +} + +func inspectAndSanitizeParameters(p interface{}) { + paramMap, ok := p.(map[string]interface{}) + if !ok { + zap.S().Debug("invalid data for 'Parameters', should be of type map[string]interface{}") + return + } + structFieldsMap := examineStruct(reflect.TypeOf(cloudformation.Parameter{})) + if structFieldsMap != nil { + for paramName := range paramMap { + v, ok := structFieldsMap[paramName] + if !ok { + zap.S().Debug(fmt.Sprintf("attribute '%s', not present in 'Parameter' struct fields", paramName)) + continue + } + val := fixWithType(paramMap[paramName], v.Type) + if val != nil { + paramMap[paramName] = val + } + } + } +} + +func inspectAndSanitizeResource(r interface{}) (shouldRemoveResource bool) { + resMap, ok := r.(map[string]interface{}) + if !ok { + zap.S().Debug("invalid data for 'Resource', should be of type map[string]interface{}") + return + } + + // get the type of the resource + t, ok := resMap["Type"] + if !ok { + zap.S().Debug("resource must have an attribute 'Type'") + return + } + + tVal, ok := t.(string) + if !ok { + zap.S().Debug("attribute 'Type' should be a string") + return + } + + goformationCftObj, ok := cloudformation.AllResources()[tVal] + if !ok { + shouldRemoveResource = true + zap.S().Debug(fmt.Sprintf("not goformation resource present for '%s'", tVal)) + return + } + + cftObjType := reflect.TypeOf(goformationCftObj) + // if the object is of pointer type, get type of its concrete value + if cftObjType.Kind() == reflect.Ptr { + cftObjType = cftObjType.Elem() + } + structFieldsMap := examineStruct(cftObjType) + if structFieldsMap != nil { + // sanitize the properties of the resource + prop, ok := resMap["Properties"] + if !ok { + zap.S().Debug("resource doesn't have 'Properties'") + return + } + + propMap, ok := prop.(map[string]interface{}) + if !ok { + zap.S().Debug("'Properties' should be of type map[string]interface{}") + return + } + + for propName := range propMap { + structField, ok := structFieldsMap[propName] + if !ok { + zap.S().Debug(fmt.Sprintf("attribute '%s', not present in '%s' struct fields", propName, tVal)) + continue + } + val := fixWithType(propMap[propName], structField.Type) + if val != nil { + propMap[propName] = val + } + } + + inspectAndSanitizeResourceAttributes(resMap) + } + return +} + +func inspectAndSanitizeResourceAttributes(resource map[string]interface{}) { + // every cft resource has 6 attributes as specified at https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-product-attribute-reference.html + + // sanitize CreationPolicy if present (CreationPolicy is an object) + cp, ok := resource["CreationPolicy"] + if ok { + cpMap, ok := cp.(map[string]interface{}) + if ok { + structFieldsMap := examineStruct(reflect.TypeOf(policies.CreationPolicy{})) + for k := range cpMap { + v, ok := structFieldsMap[k] + if !ok { + zap.S().Debug(fmt.Sprintf("attribute '%s' not present 'CreationPolicy' struct", k)) + continue + } + val := fixWithType(cpMap[k], v.Type) + if val != nil { + cpMap[k] = val + } + } + } + } + + // sanitize UpdatePolicy if present (UpdatePolicy is an object) + up, ok := resource["UpdatePolicy"] + if ok { + upMap, ok := up.(map[string]interface{}) + if ok { + structFieldsMap := examineStruct(reflect.TypeOf(policies.UpdatePolicy{})) + for k := range upMap { + v, ok := structFieldsMap[k] + if !ok { + zap.S().Debug(fmt.Sprintf("attribute '%s' not present 'UpdatePolicy' struct", k)) + continue + } + val := fixWithType(upMap[k], v.Type) + if val != nil { + upMap[k] = val + } + } + } + } + + // sanitize DependsOn if present (DependsOn is a slice) + d, ok := resource["DependsOn"] + if ok { + // check if DependsOn is a slice + _, ok = d.([]interface{}) + if !ok { + newVal := make([]interface{}, 0) + newVal = append(newVal, d) + resource["DependsOn"] = newVal + } + } + + // Metadata is of type map[string]interface{}, we do not need to sanitize + // DeletionPolicy is of type string, we do not need to sanitize + // UpdateReplacePolicy is of type string, we do not need to sanitize +} + +// fixWithType... tries to fix the orignal value based on type specified +// it doesn't try to fix, if type of original data is the type specified +func fixWithType(data interface{}, r reflect.Type) interface{} { + switch t := data.(type) { + case int, int8, int16, int32, int64: + val := t.(int) + switch r.Kind() { + case reflect.Float32, reflect.Float64: + return float64(val) + case reflect.String: + return strconv.Itoa(val) + case reflect.Ptr: + return fixWithType(data, r.Elem()) + } + case string: + switch r.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v, err := strconv.ParseInt(t, 10, 64) + if err == nil { + return v + } + case reflect.Float32: + v, err := strconv.ParseFloat(t, 32) + if err == nil { + return v + } + case reflect.Float64: + v, err := strconv.ParseFloat(t, 64) + if err == nil { + return v + } + case reflect.Bool: + v, err := strconv.ParseBool(t) + if err == nil { + return v + } + case reflect.Ptr: + return fixWithType(data, r.Elem()) + } + case bool: + switch r.Kind() { + case reflect.String: + return strconv.FormatBool(t) + case reflect.Ptr: + return fixWithType(data, r.Elem()) + } + + case float32, float64: + val := t.(float64) + switch r.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int(val) + case reflect.String: + return strconv.FormatFloat(val, 'f', -1, 64) + case reflect.Ptr: + return fixWithType(data, r.Elem()) + } + + case []interface{}: + switch r.Kind() { + case reflect.Array, reflect.Slice, reflect.Ptr: + arr := []interface{}{} + for x := range t { + v := fixWithType(t[x], r.Elem()) + if v != nil { + arr = append(arr, v) + } else { + arr = append(arr, t[x]) + } + } + return arr + } + case map[string]interface{}: + switch r.Kind() { + case reflect.Struct: + sType := reflect.New(r).Type().Elem() + mMap := examineStruct(sType) + for k := range t { + v, ok := mMap[k] + if !ok { + zap.S().Debug(fmt.Sprintf("attribute '%s' not present in struct '%s'", k, sType.String())) + continue + } + val := fixWithType(t[k], v.Type) + if val != nil { + t[k] = val + } + } + return t + case reflect.Ptr: + sType := reflect.New(r).Type().Elem().Elem() + mMap := examineStruct(sType) + for k := range t { + v, ok := mMap[k] + if !ok { + zap.S().Debug(fmt.Sprintf("attribute '%s' not present in struct '%s'", k, sType.String())) + continue + } + val := fixWithType(t[k], v.Type) + if val != nil { + t[k] = val + } + } + return t + } + } + return nil +} + +func examineStruct(t reflect.Type) map[string]reflect.StructField { + if t.Kind() != reflect.Struct { + return nil + } + m := make(map[string]reflect.StructField) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + key := f.Name + // we want to get the tag name in the struct definition + // struct field name may be different than tag name + tag := f.Tag.Get("json") + if tag != "" && tag != "-" { + if i := strings.Index(tag, ","); i != -1 { + tag = tag[:strings.Index(tag, ",")] + } + key = tag + } + m[key] = f + } + return m +} diff --git a/pkg/iac-providers/cft/v1/sanitize-cft-template_test.go b/pkg/iac-providers/cft/v1/sanitize-cft-template_test.go new file mode 100644 index 000000000..366ee9681 --- /dev/null +++ b/pkg/iac-providers/cft/v1/sanitize-cft-template_test.go @@ -0,0 +1,431 @@ +/* + Copyright (C) 2020 Accurics, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cftv1 + +import ( + "encoding/json" + "io/ioutil" + "path/filepath" + "reflect" + "testing" + + "github.com/awslabs/goformation/v4" +) + +func TestCFTV1_sanitizeCftTemplate(t *testing.T) { + type args struct { + isYAML bool + } + tests := []struct { + name string + inputFile string + args args + wantErr bool + }{ + { + name: "input file with incorrect values in parameters", + inputFile: filepath.Join("testdata", "incorrectTypesInParamsCftTemplate.yml"), + args: args{ + isYAML: true, + }, + wantErr: false, + }, + { + name: "input file with incorrect values in parameters", + inputFile: filepath.Join("testdata", "incorrectTypesInResourcesCftTemplate.yml"), + args: args{ + isYAML: true, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + a := &CFTV1{} + data, err := ioutil.ReadFile(tt.inputFile) + if err != nil { + t.Error(err) + } + + _, err = goformation.Open(tt.inputFile) + if err == nil { + t.Error("CFTV1.sanitizeCftTemplate() got no error, expected parsing error") + } + + got, err := a.sanitizeCftTemplate(data, tt.args.isYAML) + if (err != nil) != tt.wantErr { + t.Errorf("CFTV1.sanitizeCftTemplate() error = %v, wantErr %v", err, tt.wantErr) + return + } + + _, err = goformation.ParseJSON(got) + if err != nil { + t.Error("CFTV1.sanitizeCftTemplate() got error, expected no error") + } + }) + } +} + +func Test_fixWithType(t *testing.T) { + intVar := 1 + boolVar := true + floatVar := 1.0 + stringVar := "test" + stringSliceVar := []string{} + pointerToStringSliceVar := []*string{} + boolSliceVar := []bool{} + + type Address struct { + City string + PIN *string + } + + type Employee struct { + Name *string + Age float64 + Skills []string + Addr *Address `json:"emp_address"` + IsManager *bool + } + + type Department struct { + Name string + Count *float64 + Employees []Employee `json:"dept_employees"` + } + + dept := Department{} + + var invalidDeptData map[string]interface{} + var validDeptData map[string]interface{} + + invalidDeptDataStr := []byte(`{ + "Name": "Engineering", + "Count": "100", + "dept_employees": [ + { + "Name": "emp1", + "Age": 25, + "Skills": ["skill1", 2, 3], + "IsManager": "true", + "emp_address": { + "City": "Xandar", + "PIN": 111111 + } + }, + { + "Name": "emp2", + "Age": "35", + "Skills": ["skill1", "skill2", 3], + "IsManager": false, + "emp_address": { + "City": 123, + "PIN": "222222" + } + } + ], + "Rank": 1 + }`) + + err := json.Unmarshal(invalidDeptDataStr, &invalidDeptData) + if err != nil { + t.Error(err) + } + + validDeptDataStr := []byte(`{ + "Name": "Engineering", + "Count": 100, + "dept_employees": [ + { + "Name": "emp1", + "Age": 25, + "Skills": ["skill1", "2", "3"], + "IsManager": true, + "emp_address": { + "City": "Xandar", + "PIN": "111111" + } + }, + { + "Name": "emp2", + "Age": 35, + "Skills": ["skill1", "skill2", "3"], + "IsManager": false, + "emp_address": { + "City": "123", + "PIN": "222222" + } + } + ], + "Rank": 1 + }`) + + json.Unmarshal(validDeptDataStr, &validDeptData) + if err != nil { + t.Error(err) + } + + type args struct { + data interface{} + r reflect.Type + } + tests := []struct { + name string + args args + want interface{} + }{ + { + name: "type of data matches expected type: int", + args: args{ + data: 13, + r: reflect.TypeOf(intVar), + }, + want: nil, + }, + { + name: "type of data matches expected type: string", + args: args{ + data: "1", + r: reflect.TypeOf(stringVar), + }, + want: nil, + }, + { + name: "type of data matches expected type: bool", + args: args{ + data: false, + r: reflect.TypeOf(boolVar), + }, + want: nil, + }, + { + name: "type of data matches expected type: float", + args: args{ + data: 1.0, + r: reflect.TypeOf(floatVar), + }, + want: nil, + }, + // want int against input data + { + name: "want int and original data is string, data can be converted to int", + args: args{ + data: "1", + r: reflect.TypeOf(intVar), + }, + want: int64(1), + }, + { + name: "want int and original data is float", + args: args{ + data: 2.0, + r: reflect.TypeOf(intVar), + }, + want: 2, + }, + { + name: "want int and original data is string, data cannot be converted to int", + args: args{ + data: "someValue", + r: reflect.TypeOf(intVar), + }, + // we don't modify the value if it can't be converted + want: nil, + }, + // want float against input data + { + name: "want float and original data is string, data can be converted to float", + args: args{ + data: "3.3", + r: reflect.TypeOf(floatVar), + }, + want: 3.3, + }, + { + name: "want float and original data is string, data cannot be converted to float", + args: args{ + data: "someStringValue", + r: reflect.TypeOf(floatVar), + }, + want: nil, + }, + { + name: "want float and original data is int", + args: args{ + data: 4, + r: reflect.TypeOf(floatVar), + }, + want: 4.0, + }, + // want string against input data + { + name: "want string and original data is int", + args: args{ + data: 4, + r: reflect.TypeOf(stringVar), + }, + want: "4", + }, + { + name: "want string and original data is float", + args: args{ + data: 3.141, + r: reflect.TypeOf(stringVar), + }, + want: "3.141", + }, + { + name: "want string and original data is boolean", + args: args{ + data: false, + r: reflect.TypeOf(stringVar), + }, + want: "false", + }, + // want bool against input data + { + name: "want bool and original data is string", + args: args{ + data: "false", + r: reflect.TypeOf(boolVar), + }, + want: false, + }, + { + name: "want bool and original data is int", + args: args{ + data: 3, + r: reflect.TypeOf(boolVar), + }, + want: nil, + }, + // tests for array and objects + { + name: "want array of string and input is array of integers", + args: args{ + data: []interface{}{1, 2, 3}, + r: reflect.TypeOf(stringSliceVar), + }, + want: []interface{}{"1", "2", "3"}, + }, + { + name: "want array of string and input is array of integers", + args: args{ + data: []interface{}{1, 2, 3}, + r: reflect.TypeOf(pointerToStringSliceVar), + }, + want: []interface{}{"1", "2", "3"}, + }, + { + name: "want array of bools and input is array of strings", + args: args{ + data: []interface{}{"false", "true"}, + r: reflect.TypeOf(boolSliceVar), + }, + want: []interface{}{false, true}, + }, + { + name: "input is map[string]interface{} with invalid data w.r.t struct fields", + args: args{ + data: invalidDeptData, + r: reflect.TypeOf(dept), + }, + want: validDeptData, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := fixWithType(tt.args.data, tt.args.r); !reflect.DeepEqual(got, tt.want) { + t.Errorf("fixWithType() = %+v, want %+v", got, tt.want) + } + }) + } +} + +func Test_examineStruct(t *testing.T) { + nonStructVar := "test" + + type structWithoutJSONTags struct { + One string + Two int + Three interface{} + } + + type structWithJSONTags struct { + One string `json:"one"` + Two int `json:"t,omitempty"` + Three interface{} `json:"third_tag,omitempty"` + Four float64 + } + + structVar1 := structWithoutJSONTags{} + structVar2 := structWithJSONTags{} + + type args struct { + t reflect.Type + } + tests := []struct { + name string + args args + want map[string]reflect.StructField + length int + wantKeys []string + }{ + { + name: "input type is not a struct", + args: args{ + t: reflect.TypeOf(nonStructVar), + }, + want: nil, + }, + { + name: "input type is a struct, struct fields don't have json tags", + args: args{ + t: reflect.TypeOf(structVar1), + }, + want: nil, + length: 3, + wantKeys: []string{"One", "Two", "Three"}, + }, + { + name: "input type is a struct, struct fields have json tags", + args: args{ + t: reflect.TypeOf(structVar2), + }, + want: nil, + length: 4, + wantKeys: []string{"one", "t", "third_tag", "Four"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := examineStruct(tt.args.t) + if got != nil { + if tt.length != len(got) { + t.Errorf("examineStruct() = returned map doesn't have correct length, expected %d, got %d", tt.length, len(got)) + } + + for _, key := range tt.wantKeys { + _, ok := got[key] + if !ok { + t.Errorf("examineStruct() = returned map doesn't have an expected key %s", key) + } + } + } + }) + } +} diff --git a/pkg/iac-providers/cft/v1/testdata/incorrectTypesInParamsCftTemplate.yml b/pkg/iac-providers/cft/v1/testdata/incorrectTypesInParamsCftTemplate.yml new file mode 100644 index 000000000..7c3f630b0 --- /dev/null +++ b/pkg/iac-providers/cft/v1/testdata/incorrectTypesInParamsCftTemplate.yml @@ -0,0 +1,361 @@ +AWSTemplateFormatVersion: "2010-09-09" +Metadata: + License: Apache-2.0 + AWS::CloudFormation::Interface: + ParameterGroups: + - Label: + default: "Database" + Parameters: + - DBName + - DBUser + - DBPassword + - DBRootPassword + - Label: + default: "Instance connection" + Parameters: + - InstanceType + - KeyName + - SSHLocation +Description: + "AWS CloudFormation Sample Template WordPress_Single_Instance: WordPress + is web software you can use to create a beautiful website or blog. This template + installs WordPress with a local MySQL database for storage. It demonstrates using + the AWS CloudFormation bootstrap scripts to deploy WordPress. **WARNING** This template + creates an Amazon EC2 instance. You will be billed for the AWS resources used if + you create a stack from this template." +Parameters: + DBName: + AllowedPattern: "[a-zA-Z][a-zA-Z0-9]*" + ConstraintDescription: + must begin with a letter and contain only alphanumeric + characters. + Default: wordpressdb + Description: The WordPress database name + MaxLength: "64" + MinLength: "1" + Type: String + DBPassword: + AllowedPattern: "[a-zA-Z0-9]+" + ConstraintDescription: must contain only alphanumeric characters. + Description: The WordPress database admin account password + MaxLength: "41" + MinLength: "8" + NoEcho: "true" + Type: String + DBRootPassword: + AllowedPattern: "[a-zA-Z0-9]+" + ConstraintDescription: must contain only alphanumeric characters. + Description: MySQL root password + MaxLength: "41" + MinLength: "8" + NoEcho: "true" + Type: String + DBUser: + AllowedPattern: "[a-zA-Z][a-zA-Z0-9]*" + ConstraintDescription: + must begin with a letter and contain only alphanumeric + characters. + Description: The WordPress database admin account username + MaxLength: "16" + MinLength: "1" + Type: String + InstanceType: + AllowedValues: + - t2.nano + - t2.micro + - t2.small + - t2.medium + - t2.large + - m3.medium + - m3.large + - m3.xlarge + - m3.2xlarge + - m4.large + - m4.xlarge + - m4.2xlarge + - m4.4xlarge + - m4.10xlarge + - c3.large + ConstraintDescription: must be a valid EC2 instance type. + Default: t2.nano + Description: WebServer EC2 instance type + Type: String + KeyName: + ConstraintDescription: must be the name of an existing EC2 KeyPair. + Description: Name of an existing EC2 KeyPair to enable SSH access to the instances + Type: AWS::EC2::KeyPair::KeyName + SSHLocation: + AllowedPattern: (\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2}) + ConstraintDescription: must be a valid IP CIDR range of the form x.x.x.x/x. + Default: 0.0.0.0/0 + Description: The IP address range that can be used to SSH to the EC2 instances + MaxLength: "18" + MinLength: "9" + Type: String +Mappings: + AWSInstanceType2Arch: + c3.2xlarge: + Arch: HVM64 + c3.4xlarge: + Arch: HVM64 + c3.8xlarge: + Arch: HVM64 + c3.large: + Arch: HVM64 + c3.xlarge: + Arch: HVM64 + c4.2xlarge: + Arch: HVM64 + c4.4xlarge: + Arch: HVM64 + c4.8xlarge: + Arch: HVM64 + c4.large: + Arch: HVM64 + c4.xlarge: + Arch: HVM64 + cc2.8xlarge: + Arch: HVM64 + cr1.8xlarge: + Arch: HVM64 + d2.2xlarge: + Arch: HVM64 + d2.4xlarge: + Arch: HVM64 + d2.8xlarge: + Arch: HVM64 + d2.xlarge: + Arch: HVM64 + g2.2xlarge: + Arch: HVMG2 + g2.8xlarge: + Arch: HVMG2 + hi1.4xlarge: + Arch: HVM64 + hs1.8xlarge: + Arch: HVM64 + i2.2xlarge: + Arch: HVM64 + i2.4xlarge: + Arch: HVM64 + i2.8xlarge: + Arch: HVM64 + i2.xlarge: + Arch: HVM64 + m3.2xlarge: + Arch: HVM64 + m3.large: + Arch: HVM64 + m3.medium: + Arch: HVM64 + m3.xlarge: + Arch: HVM64 + m4.10xlarge: + Arch: HVM64 + m4.2xlarge: + Arch: HVM64 + m4.4xlarge: + Arch: HVM64 + m4.large: + Arch: HVM64 + m4.xlarge: + Arch: HVM64 + r3.2xlarge: + Arch: HVM64 + r3.4xlarge: + Arch: HVM64 + r3.8xlarge: + Arch: HVM64 + r3.large: + Arch: HVM64 + r3.xlarge: + Arch: HVM64 + t2.large: + Arch: HVM64 + t2.medium: + Arch: HVM64 + t2.micro: + Arch: HVM64 + t2.nano: + Arch: HVM64 + t2.small: + Arch: HVM64 + AWSRegionArch2AMI: + ap-northeast-1: + HVM64: ami-383c1956 + HVMG2: ami-08e5c166 + ap-northeast-2: + HVM64: ami-249b554a + HVMG2: NOT_SUPPORTED + ap-southeast-1: + HVM64: ami-c9b572aa + HVMG2: ami-5a15d239 + ap-southeast-2: + HVM64: ami-48d38c2b + HVMG2: ami-0c1a446f + cn-north-1: + HVM64: ami-43a36a2e + HVMG2: NOT_SUPPORTED + eu-central-1: + HVM64: ami-bc5b48d0 + HVMG2: ami-ba1a09d6 + eu-west-1: + HVM64: ami-bff32ccc + HVMG2: ami-83fd23f0 + sa-east-1: + HVM64: ami-6817af04 + HVMG2: NOT_SUPPORTED + us-east-1: + HVM64: ami-60b6c60a + HVMG2: ami-e998ea83 + us-west-1: + HVM64: ami-d5ea86b5 + HVMG2: ami-943956f4 + us-west-2: + HVM64: ami-f0091d91 + HVMG2: ami-315f4850 +Resources: + WebServer: + Type: AWS::EC2::Instance + CreationPolicy: + ResourceSignal: + Timeout: PT15M + Metadata: + AWS::CloudFormation::Init: + configSets: + wordpress_install: + - install_cfn + - install_wordpress + - configure_wordpress + configure_wordpress: + commands: + 01_set_mysql_root_password: + command: !Sub | + mysqladmin -u root password '${DBRootPassword}' + test: !Sub | + $(mysql ${DBName} -u root --password='${DBRootPassword}' >/dev/null 2>&1 /dev/null 2>&1 + + HTML Meta Tag + + +

Redirect to WordPress.

+ + + packages: + yum: + httpd24: [] + mysql: [] + mysql-devel: [] + mysql-libs: [] + mysql-server: [] + php73: [] + php73-mysqlnd: [] + services: + sysvinit: + httpd: + enabled: true + ensureRunning: true + mysqld: + enabled: true + ensureRunning: true + sources: + /var/www/html: http://wordpress.org/latest.tar.gz + Properties: + ImageId: + !FindInMap [ + AWSRegionArch2AMI, + !Ref "AWS::Region", + !FindInMap [AWSInstanceType2Arch, !Ref InstanceType, Arch], + ] + InstanceType: + Ref: InstanceType + KeyName: + Ref: KeyName + SecurityGroups: + - Ref: WebServerSecurityGroup + UserData: + Fn::Base64: !Sub | + #!/bin/bash -xe + yum update -y + yum update -y aws-cfn-bootstrap + /opt/aws/bin/cfn-init -v --stack ${AWS::StackId} --resource WebServer --configsets wordpress_install --region ${AWS::Region} + /opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackId} --resource WebServer --region ${AWS::Region} + WebServerSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: "Enable HTTP access via port 80 locked down to the load balancer + SSH access" + SecurityGroupIngress: + - CidrIp: 0.0.0.0/0 + FromPort: "80" + IpProtocol: tcp + ToPort: "80" + - CidrIp: !Ref SSHLocation + FromPort: "22" + IpProtocol: tcp + ToPort: 22 +Outputs: + PublicIP: + Description: EC2 public IP + Value: !GetAtt WebServer.PublicIp + WebsiteURL: + Description: WordPress Website + Value: !Sub "http://${WebServer.PublicDnsName}/wordpress" diff --git a/pkg/iac-providers/cft/v1/testdata/incorrectTypesInResourcesCftTemplate.yml b/pkg/iac-providers/cft/v1/testdata/incorrectTypesInResourcesCftTemplate.yml new file mode 100644 index 000000000..311f4fd70 --- /dev/null +++ b/pkg/iac-providers/cft/v1/testdata/incorrectTypesInResourcesCftTemplate.yml @@ -0,0 +1,1427 @@ +AWSTemplateFormatVersion: "2010-09-09" +Description: >- + EKS for us-east-1 with Kubernetes Object deployment support. +Resources: + ##### START VPC RESOURCES ##### + VPC: + Type: AWS::EC2::VPC + Properties: + CidrBlock: 10.0.0.0/16 + InstanceTenancy: default + EnableDnsSupport: true + EnableDnsHostnames: true + Tags: + - Key: BelongsTo + Value: !Ref "AWS::StackName" + - Key: Name + Value: GremlinGameDay/Gremlin/DefaultVpc + InternetGateway: + Type: AWS::EC2::InternetGateway + Properties: + Tags: + - Key: Name + Value: !Ref "AWS::StackName" + VPCGatewayAttachment: + Type: AWS::EC2::VPCGatewayAttachment + Properties: + VpcId: !Ref "VPC" + InternetGatewayId: !Ref "InternetGateway" + PrivateSubnet1A: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref "VPC" + CidrBlock: 10.0.0.0/19 + AvailabilityZone: us-east-1a + Tags: + - Key: kubernetes.io/role/internal-elb + Value: 1 + PrivateSubnet2A: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref "VPC" + CidrBlock: 10.0.32.0/19 + AvailabilityZone: us-east-1b + Tags: + - Key: kubernetes.io/role/internal-elb + Value: 1 + PrivateSubnet3A: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref "VPC" + CidrBlock: 10.0.64.0/19 + AvailabilityZone: us-east-1c + Tags: + - Key: kubernetes.io/role/internal-elb + Value: 1 + PublicSubnet1: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref "VPC" + CidrBlock: 10.0.96.0/19 + AvailabilityZone: us-east-1a + MapPublicIpOnLaunch: true + Tags: + - Key: kubernetes.io/role/elb + Value: 1 + PublicSubnet2: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref "VPC" + CidrBlock: 10.0.128.0/19 + AvailabilityZone: us-east-1b + MapPublicIpOnLaunch: true + Tags: + - Key: kubernetes.io/role/elb + Value: 1 + PublicSubnet3: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref "VPC" + CidrBlock: 10.0.160.0/19 + AvailabilityZone: us-east-1c + MapPublicIpOnLaunch: true + Tags: + - Key: kubernetes.io/role/elb + Value: 1 + PrivateSubnet1ARouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref "VPC" + Tags: + - Key: Name + Value: Private subnet 1A + - Key: Network + Value: Private + PrivateSubnet1ARoute: + Type: AWS::EC2::Route + Properties: + RouteTableId: !Ref "PrivateSubnet1ARouteTable" + DestinationCidrBlock: "0.0.0.0/0" + NatGatewayId: !Ref "NATGateway1" + PrivateSubnet1ARouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref "PrivateSubnet1A" + RouteTableId: !Ref "PrivateSubnet1ARouteTable" + PrivateSubnet2ARouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref "VPC" + Tags: + - Key: Name + Value: Private subnet 2A + - Key: Network + Value: Private + PrivateSubnet2ARoute: + Type: AWS::EC2::Route + Properties: + RouteTableId: !Ref "PrivateSubnet2ARouteTable" + DestinationCidrBlock: "0.0.0.0/0" + NatGatewayId: !Ref "NATGateway2" + PrivateSubnet2ARouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref "PrivateSubnet2A" + RouteTableId: !Ref "PrivateSubnet2ARouteTable" + PrivateSubnet3ARouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref "VPC" + Tags: + - Key: Name + Value: Private subnet 3A + - Key: Network + Value: Private + PrivateSubnet3ARoute: + Type: AWS::EC2::Route + Properties: + RouteTableId: !Ref "PrivateSubnet3ARouteTable" + DestinationCidrBlock: "0.0.0.0/0" + NatGatewayId: !Ref "NATGateway3" + PrivateSubnet3ARouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref "PrivateSubnet3A" + RouteTableId: !Ref "PrivateSubnet3ARouteTable" + PublicSubnetRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref "VPC" + Tags: + - Key: Name + Value: Public Subnets + - Key: Network + Value: Public + PublicSubnetRoute: + DependsOn: VPCGatewayAttachment + Type: AWS::EC2::Route + Properties: + RouteTableId: !Ref "PublicSubnetRouteTable" + DestinationCidrBlock: "0.0.0.0/0" + GatewayId: !Ref "InternetGateway" + PublicSubnet1RouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref "PublicSubnet1" + RouteTableId: !Ref "PublicSubnetRouteTable" + PublicSubnet2RouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref "PublicSubnet2" + RouteTableId: !Ref "PublicSubnetRouteTable" + PublicSubnet3RouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref "PublicSubnet3" + RouteTableId: !Ref "PublicSubnetRouteTable" + NAT1EIP: + DependsOn: VPCGatewayAttachment + Type: AWS::EC2::EIP + Properties: + Domain: vpc + NAT2EIP: + DependsOn: VPCGatewayAttachment + Type: AWS::EC2::EIP + Properties: + Domain: vpc + NAT3EIP: + DependsOn: VPCGatewayAttachment + Type: AWS::EC2::EIP + Properties: + Domain: vpc + NATGateway1: + DependsOn: VPCGatewayAttachment + Type: AWS::EC2::NatGateway + Properties: + AllocationId: !GetAtt "NAT1EIP.AllocationId" + SubnetId: !Ref "PublicSubnet1" + NATGateway2: + DependsOn: VPCGatewayAttachment + Type: AWS::EC2::NatGateway + Properties: + AllocationId: !GetAtt "NAT2EIP.AllocationId" + SubnetId: !Ref "PublicSubnet2" + NATGateway3: + DependsOn: VPCGatewayAttachment + Type: AWS::EC2::NatGateway + Properties: + AllocationId: !GetAtt "NAT3EIP.AllocationId" + SubnetId: !Ref "PublicSubnet3" + + ##### END VPC RESOURCES ##### + + ##### START SECURITY GROUPS ##### + ClusterControlPlaneSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: Cluster communication + VpcId: !Ref "VPC" + + NodeSecurityGroup: + Type: "AWS::EC2::SecurityGroup" + Properties: + GroupDescription: Security group for all nodes in the cluster + Tags: + - Key: + Fn::Sub: + - kubernetes.io/cluster/${KubeName} + - KubeName: !GetAtt KubeCreate.Name + Value: owned + VpcId: !Ref "VPC" + + BastionSecurityGroup: + Type: "AWS::EC2::SecurityGroup" + Properties: + GroupDescription: Security group for bastion enabling SSH access with EC2 Instance Connect + SecurityGroupIngress: + - CidrIp: 0.0.0.0/0 + FromPort: 22 + IpProtocol: tcp + ToPort: 22 + VpcId: !Ref "VPC" + + NodeSecurityGroupIngress: + Type: "AWS::EC2::SecurityGroupIngress" + DependsOn: NodeSecurityGroup + Properties: + Description: Allow node to communicate with each other + FromPort: 0 + GroupId: !Ref NodeSecurityGroup + IpProtocol: "-1" + SourceSecurityGroupId: !Ref NodeSecurityGroup + ToPort: 65535 + + ClusterControlPlaneSecurityGroupIngress: + Type: "AWS::EC2::SecurityGroupIngress" + DependsOn: NodeSecurityGroup + Properties: + Description: Allow pods to communicate with the cluster API Server + FromPort: 443 + GroupId: !Ref ClusterControlPlaneSecurityGroup + IpProtocol: tcp + SourceSecurityGroupId: !Ref NodeSecurityGroup + ToPort: 443 + + NodeSecurityGroupFromControlPlaneIngress: + Type: "AWS::EC2::SecurityGroupIngress" + DependsOn: NodeSecurityGroup + Properties: + Description: Allow worker Kubelets and pods to receive communication from the cluster control plane + FromPort: 1025 + GroupId: !Ref NodeSecurityGroup + IpProtocol: tcp + SourceSecurityGroupId: !Ref ClusterControlPlaneSecurityGroup + ToPort: 65535 + + NodeSecurityGroupFromControlPlaneOn443Ingress: + Type: "AWS::EC2::SecurityGroupIngress" + DependsOn: NodeSecurityGroup + Properties: + Description: Allow pods running extension API servers on port 443 to receive communication from cluster control plane + FromPort: 443 + GroupId: !Ref NodeSecurityGroup + IpProtocol: tcp + SourceSecurityGroupId: !Ref ClusterControlPlaneSecurityGroup + ToPort: 443 + + ControlPlaneEgressToNodeSecurityGroup: + Type: "AWS::EC2::SecurityGroupEgress" + DependsOn: NodeSecurityGroup + Properties: + Description: Allow the cluster control plane to communicate with worker Kubelet and pods + DestinationSecurityGroupId: !Ref NodeSecurityGroup + FromPort: 1025 + GroupId: !Ref ClusterControlPlaneSecurityGroup + IpProtocol: tcp + ToPort: 65535 + + ControlPlaneEgressToNodeSecurityGroupOn443: + Type: "AWS::EC2::SecurityGroupEgress" + DependsOn: NodeSecurityGroup + Properties: + Description: Allow the cluster control plane to communicate with pods running extension API servers on port 443 + DestinationSecurityGroupId: !Ref NodeSecurityGroup + FromPort: 443 + GroupId: !Ref ClusterControlPlaneSecurityGroup + IpProtocol: tcp + ToPort: 443 + + ##### END SECURITY GROUPS ##### + + ##### START IAM ROLES ##### + + ControlPlaneProvisionRole: + Type: "AWS::IAM::Role" + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: lambda.amazonaws.com + Action: sts:AssumeRole + - Effect: Allow + Principal: + AWS: !GetAtt BastionHostRole.Arn + Action: sts:AssumeRole + Policies: + - PolicyName: eksStackPolicy + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - cloudformation:* + - eks:* + - ec2:DescribeSecurityGroups + - ec2:DescribeSubnets + - lambda:InvokeFunction + Resource: "*" + - Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + - ec2:CreateNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DeleteNetworkInterface + Resource: + - "*" + - Action: "kms:decrypt" + Effect: Allow + Resource: "*" + - Effect: Allow + Action: + - lambda:AddPermission + - lambda:RemovePermission + Resource: "*" + - Effect: Allow + Action: + - events:PutRule + - events:DeleteRule + - events:PutTargets + - events:RemoveTargets + Resource: "*" + + ControlPlaneRole: + Type: "AWS::IAM::Role" + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: eks.amazonaws.com + Action: sts:AssumeRole + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + - arn:aws:iam::aws:policy/AmazonEKSServicePolicy + + ControlPlanePassRole: + Type: "AWS::IAM::Policy" + Properties: + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: iam:PassRole + Resource: !GetAtt ControlPlaneRole.Arn + PolicyName: !Sub "${AWS::StackName}-ControlPlanePassRole" + Roles: [!Ref ControlPlaneProvisionRole] + + BastionHostRole: + Type: "AWS::IAM::Role" + Properties: + Path: / + AssumeRolePolicyDocument: + Statement: + - Action: + - "sts:AssumeRole" + Principal: + Service: + - ec2.amazonaws.com + Effect: Allow + Version: 2012-10-17 + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM" + Policies: + - PolicyName: "ec2-connect-policy" + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: "Allow" + Action: + - "ec2:DescribeInstances" + - "ec2-instance-connect:SendSSHPublicKey" + - ec2:AssociateAddress + - ec2:DescribeAddresses + - cloudformation:* + - eks:* + - ec2:DescribeSecurityGroups + - ec2:DescribeSubnets + Resource: "*" + + BastionHostProfile: + DependsOn: BastionHostRole + Type: "AWS::IAM::InstanceProfile" + Properties: + Roles: + - !Ref BastionHostRole + Path: / + + CleanupLoadBalancersRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: ["sts:AssumeRole"] + Effect: Allow + Principal: + Service: [lambda.amazonaws.com] + Version: "2012-10-17" + Path: / + Policies: + - PolicyName: LambdaRole + PolicyDocument: + Version: "2012-10-17" + Statement: + - Action: + - "logs:CreateLogGroup" + - "logs:CreateLogStream" + - "logs:PutLogEvents" + Effect: Allow + Resource: "arn:aws:logs:*:*:*" + - Action: + - "elasticloadbalancing:DescribeLoadBalancers" + - "elasticloadbalancing:DescribeTags" + - "elasticloadbalancing:DeleteLoadBalancer" + - "ec2:DescribeTags" + - "ec2:DeleteSecurityGroup" + - "ec2:DescribeNetworkInterfaces" + - "ec2:DescribeSecurityGroups" + - "ec2:RevokeSecurityGroupEgress" + - "ec2:RevokeSecurityGroupIngress" + Effect: Allow + Resource: "*" + + ##### END IAM ROLES ##### + + ##### START EKS RESOURCES ##### + + BastionHost: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-0b69ea66ff7391e80 + IamInstanceProfile: !Ref BastionHostProfile + InstanceType: t2.micro + BlockDeviceMappings: + - DeviceName: /dev/xvda + Ebs: + VolumeSize: 10 + VolumeType: gp2 + DeleteOnTermination: true + SecurityGroupIds: + - Ref: BastionSecurityGroup + SubnetId: !Ref PublicSubnet1 + Tags: + - Key: Name + Value: !Sub "${AWS::StackName}-bastion" + UserData: + Fn::Base64: + Fn::Sub: + - | + #!/bin/bash -xe + exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 + + yum update -y && yum install -y unzip make wget tar gzip python3 git + + curl -o kubectl https://amazon-eks.s3-us-west-2.amazonaws.com/1.14.6/2019-08-22/bin/linux/amd64/kubectl \ + && chmod +x ./kubectl \ + && cp ./kubectl /usr/local/bin/kubectl + + wget https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 \ + && mv jq-linux64 /usr/local/bin/jq \ + && chmod +x /usr/local/bin/jq + + curl -o helm.tar.gz https://get.helm.sh/helm-v2.14.3-linux-amd64.tar.gz \ + && tar -zxvf helm.tar.gz \ + && mv linux-amd64/helm /usr/local/bin/helm \ + && chmod +x /usr/local/bin/helm + + curl -o aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.14.6/2019-08-22/bin/linux/amd64/aws-iam-authenticator \ + && mv aws-iam-authenticator /usr/local/bin/aws-iam-authenticator \ + && chmod +x /usr/local/bin/aws-iam-authenticator + + su ec2-user -c 'aws eks update-kubeconfig --name ${KubeName} --role-arn ${ControlRole} --region ${AWS::Region}' + + cat <> /home/ec2-user/.bashrc + export K8S_ROLE_ARN=${ControlRole} + export K8S_CLUSTER_NAME=${KubeName} + export K8S_CA_DATA=${CAData} + export K8S_ENDPOINT=${Endpoint} + export PATH=/usr/local/bin:$PATH + EOF + + /opt/aws/bin/cfn-signal --exit-code $? \ + --stack ${AWS::StackName} \ + --resource BastionHost \ + --region ${AWS::Region} + - KubeName: !GetAtt KubeCreate.Name + CAData: !GetAtt KubeCreate.CertificateAuthorityData + Endpoint: !GetAtt KubeCreate.Endpoint + ControlRole: !GetAtt ControlPlaneProvisionRole.Arn + + NodeInstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Path: "/" + Roles: + - !Ref NodeInstanceRole + + NodeInstanceRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Principal: + Service: + - ec2.amazonaws.com + Action: + - sts:AssumeRole + Path: "/" + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy + - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly + - arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy + Policies: + - PolicyName: clusterAutoScalingPolicy + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - "autoscaling:DescribeAutoScalingGroups" + - "autoscaling:DescribeAutoScalingInstances" + - "autoscaling:DescribeLaunchConfigurations" + - "autoscaling:DescribeTags" + - "autoscaling:SetDesiredCapacity" + - "autoscaling:TerminateInstanceInAutoScalingGroup" + - "ec2:DescribeLaunchTemplateVersions" + Resource: "*" + NodeGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + DesiredCapacity: "3" + LaunchTemplate: + LaunchTemplateName: !Sub "${AWS::StackName}" + Version: !GetAtt "NodeGroupLaunchTemplate.LatestVersionNumber" + MaxSize: "10" + MinSize: "3" + Tags: + - Key: Name + PropagateAtLaunch: "true" + Value: !Sub "${AWS::StackName}-ng" + - Key: + Fn::Sub: + - kubernetes.io/cluster/${KubeName} + - KubeName: !GetAtt KubeCreate.Name + PropagateAtLaunch: "true" + Value: owned + - Key: + Fn::Sub: + - k8s.io/cluster-autoscaler/${KubeName} + - KubeName: !GetAtt KubeCreate.Name + PropagateAtLaunch: "true" + Value: owned + - Key: k8s.io/cluster-autoscaler/enabled + PropagateAtLaunch: "true" + Value: true + VPCZoneIdentifier: + - !Ref PrivateSubnet1A + - !Ref PrivateSubnet2A + - !Ref PrivateSubnet3A + UpdatePolicy: + AutoScalingRollingUpdate: + MaxBatchSize: "1" + MinInstancesInService: "0" + + NodeGroupLaunchTemplate: + Type: AWS::EC2::LaunchTemplate + Properties: + LaunchTemplateData: + IamInstanceProfile: + Arn: !GetAtt "NodeInstanceProfile.Arn" + ImageId: ami-0062c1c8b255c0bb6 + InstanceType: m5.large + NetworkInterfaces: + - AssociatePublicIpAddress: true + DeviceIndex: 0 + Groups: + - !Ref NodeSecurityGroup + - !Ref ClusterControlPlaneSecurityGroup + UserData: + Fn::Base64: + Fn::Sub: + - | + #!/bin/bash + set -o xtrace + /etc/eks/bootstrap.sh ${KubeName} + /opt/aws/bin/cfn-signal --exit-code $? \ + --stack ${AWS::StackName} \ + --resource NodeGroup \ + --region ${AWS::Region} + - KubeName: !GetAtt KubeCreate.Name + LaunchTemplateName: !Sub "${AWS::StackName}" + + ##### START CUSTOM RESOURCES ##### + + KubeCreateLambda: + Type: AWS::Lambda::Function + Properties: + Handler: index.lambda_handler + MemorySize: 1024 + Role: !GetAtt ControlPlaneProvisionRole.Arn + Runtime: python3.7 + Timeout: 900 + Layers: + - arn:aws:lambda:us-east-1:812570870442:layer:k8sfull:1 + - arn:aws:lambda:us-east-1:812570870442:layer:crhelper:1 + Code: + ZipFile: | + import botocore.session + import logging + import subprocess + import os + import json + import logging + from crhelper import CfnResource + + logger = logging.getLogger() + logger.setLevel(logging.INFO) + + os.environ['PATH'] = '/opt/kubectl:/opt/awscli:' + os.environ['PATH'] + + outdir = os.environ.get('TEST_OUTDIR', '/tmp') + kubeconfig = os.path.join(outdir, 'kubeconfig') + + logger = logging.getLogger(__name__) + helper = CfnResource(json_logging=True, log_level='DEBUG', polling_interval=15) + + def lambda_handler(event, context): + helper(event, context) + + @helper.poll_delete + def delete(event, context): + # delete is a special case + session = botocore.session.get_session() + eks = session.create_client('eks') + physical_id = event.get('PhysicalResourceId', None) + + cluster_name = '' + if physical_id: + cluster_name = physical_id + else: + raise Exception( + "unexpected error. cannot determine cluster name") + + logger.info("request to delete: %s" % cluster_name) + logger.info('deleting cluster') + + eks.delete_cluster(name=cluster_name) + logger.info('waiting for cluster to be deleted...') + waiter = eks.get_waiter('cluster_deleted') + waiter.wait(name=cluster_name, WaiterConfig={ + 'Delay': 30, + 'MaxAttempts': 28 + }) + return + + @helper.poll_create + @helper.poll_update + def poll_create_update(event, context): + try: + logger.info(json.dumps(event)) + + request_id = event['RequestId'] # used to generate cluster name + request_type = event['RequestType'] + props = event['ResourceProperties'] + old_props = event.get('OldResourceProperties', {}) + config = props['Config'] + + logger.info(json.dumps(config)) + + session = botocore.session.get_session() + eks = session.create_client('eks') + + cluster_name = f"{config.get('name', 'EKS')}{request_id}" + config['name'] = cluster_name + logger.info("request: %s" % config) + + if request_type == 'Create': + logger.info("creating cluster %s" % cluster_name) + try: + resp = eks.create_cluster(**config) + logger.info("create response: %s" % resp) + except Exception as e: + logger.error('Failed at creating cluster, moving on...') + logger.error(e) + elif request_type == 'Update': + logger.info("updating cluster %s" % cluster_name) + resp = eks.update_cluster_config(**config) + logger.info("update response: %s" % resp) + else: + raise Exception("Invalid request type %s" % request_type) + + # wait for the cluster to become active (14min timeout) + logger.info('waiting for cluster to become active...') + waiter = eks.get_waiter('cluster_active') + waiter.wait(name=cluster_name, WaiterConfig={ + 'Delay': 30, + 'MaxAttempts': 28 + }) + + resp = eks.describe_cluster(name=cluster_name) + logger.info("describe response: %s" % resp) + attrs = { + 'Name': cluster_name, + 'Endpoint': resp['cluster']['endpoint'], + 'Arn': resp['cluster']['arn'], + 'CertificateAuthorityData': resp['cluster']['certificateAuthority']['data'] + } + logger.info("attributes: %s" % attrs) + helper.Data['Name'] = cluster_name + helper.Data['Endpoint'] = resp['cluster']['endpoint'] + helper.Data['Arn'] = resp['cluster']['arn'] + helper.Data['CertificateAuthorityData'] = resp['cluster']['certificateAuthority']['data'] + return cluster_name + except botocore.exceptions.WaiterError as e: + logger.exception(e) + return None + except KeyError as e: + logger.exception(e) + raise Exception("invalid request. Missing '%s'" % str(e)) + except Exception as e: + logger.exception(e) + raise Exception(e.output) + + KubeCreate: + Type: "Custom::KubeCreate" + Version: "1.0" + Properties: + ServiceToken: !GetAtt KubeCreateLambda.Arn + Config: + version: "1.15" + roleArn: !GetAtt ControlPlaneRole.Arn + name: GremlinGameDay + resourcesVpcConfig: + securityGroupIds: + - !GetAtt ClusterControlPlaneSecurityGroup.GroupId + subnetIds: + - !Ref PrivateSubnet1A + - !Ref PrivateSubnet2A + - !Ref PrivateSubnet3A + - !Ref PublicSubnet1 + - !Ref PublicSubnet2 + - !Ref PublicSubnet3 + + KubeNodeJoinLambda: + Type: AWS::Lambda::Function + Properties: + Handler: index.lambda_handler + MemorySize: 1024 + Role: !GetAtt ControlPlaneProvisionRole.Arn + Runtime: python3.7 + Timeout: 900 + Layers: + - arn:aws:lambda:us-east-1:812570870442:layer:k8sfull:1 + - arn:aws:lambda:us-east-1:812570870442:layer:crhelper:1 + Code: + ZipFile: | + import json + import logging + import subprocess + import os + import time + from crhelper import CfnResource + + logger = logging.getLogger(__name__) + helper = CfnResource(json_logging=True, log_level='DEBUG') + + outdir = '/tmp' + manifest_path = '/tmp' + + os.environ['PATH'] = '/opt/kubectl:/opt/awscli:' + os.environ['PATH'] + + cluster_name = os.environ.get('CLUSTER_NAME', None) + kubeconfig = os.path.join(outdir, 'kubeconfig') + + def create_kubeconfig(): + subprocess.check_call(['aws', 'eks', 'update-kubeconfig', + '--name', cluster_name, + '--kubeconfig', kubeconfig + ]) + + @helper.create + @helper.update + def create_handler(event, _): + print('Received event: %s' % json.dumps(event)) + request_type = event['RequestType'] + manifest_text = event['ResourceProperties']['Manifest'] + + manifest_list = json.loads(manifest_text) + manifest_file = os.path.join(outdir, 'manifest.yaml') + with open(manifest_file, "w") as f: + f.writelines(map(lambda obj: json.dumps(obj), manifest_list)) + + logger.info("manifest written to: %s" % manifest_file) + + create_kubeconfig() + keep_going = True + retries = 0 + max_tries = 20 + while keep_going: + try: + kubectl('apply', manifest_file) + keep_going = False + except Exception as e: + print(e) + if max_tries > retries: + retries += 1 + time.sleep(30) + continue + else: + raise Exception(e.output) + return True + + def kubectl(verb, file): + try: + cmnd = ['kubectl', verb, '--kubeconfig', kubeconfig, '-f', file] + output = subprocess.check_output(cmnd, stderr=subprocess.STDOUT) + return output + except subprocess.CalledProcessError as exc: + raise Exception(exc.output) + else: + logger.info(output) + + def lambda_handler(event, context): + helper(event, context) + Environment: + Variables: + CLUSTER_NAME: !GetAtt KubeCreate.Name + + KubeNodeJoin: + DependsOn: [NodeInstanceRole, KubeCreate, NodeGroup] + Type: Custom::KubeNodeJoin + Properties: + ServiceToken: !GetAtt KubeNodeJoinLambda.Arn + Manifest: + Fn::Join: + - "" + - - '[{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"aws-auth","namespace":"kube-system"},"data":{"mapRoles":"[{\"rolearn\":\"' + - Fn::GetAtt: + - NodeInstanceRole + - Arn + - \",\"username\":\"system:node:{{EC2PrivateDNSName}}\",\"groups\":[\"system:bootstrappers\",\"system:nodes\"]}]","mapUsers":"[]","mapAccounts":"[]"}}] + + KubeSetupClusterLambda: + Type: AWS::Lambda::Function + Properties: + Handler: index.lambda_handler + MemorySize: 1024 + Role: !GetAtt ControlPlaneProvisionRole.Arn + Runtime: python3.7 + Timeout: 900 + Layers: + - arn:aws:lambda:us-east-1:812570870442:layer:k8sfull:1 + - arn:aws:lambda:us-east-1:812570870442:layer:crhelper:1 + Code: + ZipFile: | + import json + import logging + import subprocess + import os + from crhelper import CfnResource + + logger = logging.getLogger(__name__) + helper = CfnResource(json_logging=True, log_level='DEBUG') + + outdir = '/tmp' + + os.environ['PATH'] = '/opt/kubectl:/opt/awscli:/opt/helm:' + os.environ['PATH'] + + container_insights_url = 'https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/master/k8s-yaml-templates/quickstart/cwagent-fluentd-quickstart.yaml' + cluster_autoscale_url = 'https://gist.githubusercontent.com/allenmichael/8e2db8b62bdd4d9ec1d1ca3963f71644/raw/afc7a63fccb308c908783b4e34caf9e352f524f3/cluster-autoscaler.yaml' + cluster_name = os.environ.get('CLUSTER_NAME', None) + region = os.environ.get('REGION', None) + kubeconfig = os.path.join(outdir, 'kubeconfig') + + def create_kubeconfig(): + subprocess.check_call(['aws', 'eks', 'update-kubeconfig', + '--name', cluster_name, + '--kubeconfig', kubeconfig + ]) + + @helper.delete + def delete(event, _): + try: + print('Received event: %s' % json.dumps(event)) + + create_kubeconfig() + + setup_cluster('insights', 'delete') + setup_cluster('autoscale', 'delete') + except subprocess.CalledProcessError as exc: + logger.info(exc.output) + else: + logger.info('passed creating and deleting manifests') + return + + @helper.create + def create_handler(event, _): + try: + print('Received event: %s' % json.dumps(event)) + + create_kubeconfig() + + setup_cluster('insights', 'apply') + setup_cluster('autoscale', 'apply') + except subprocess.CalledProcessError as exc: + raise Exception(exc.output) + else: + logger.info('passed creating and deleting manifests') + return True + + def setup_cluster(type, verb): + try: + replace_text = [] + cmnd = [] + if type == 'insights': + cmnd = ['curl', container_insights_url] + replace_text = ['sed', 's/{{cluster_name}}/'+cluster_name+'/;s/{{region_name}}/'+region+'/'] + elif type == 'autoscale': + cmnd = ['curl', cluster_autoscale_url] + replace_text = ['sed', 's/{{cluster_name}}/'+cluster_name+'/'] + ps = subprocess.Popen(cmnd, stdout=subprocess.PIPE) + sed_output = subprocess.Popen(replace_text, stdin=ps.stdout, stdout=subprocess.PIPE) + kube_cmnd = ['kubectl', verb, '--kubeconfig', kubeconfig, '-f', '-'] + kube_output = subprocess.check_output(kube_cmnd, stdin=sed_output.stdout, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as exc: + raise Exception(exc.output) + else: + logger.info(kube_output) + + def lambda_handler(event, context): + helper(event, context) + Environment: + Variables: + CLUSTER_NAME: !GetAtt KubeCreate.Name + REGION: !Ref AWS::Region + + KubeSetupCluster: + DependsOn: [KubeCreate, KubeNodeJoin] + Type: Custom::KubeSetupCluster + Properties: + ServiceToken: !GetAtt KubeSetupClusterLambda.Arn + + KubeApplyManifestsLambda: + Type: AWS::Lambda::Function + Properties: + Handler: index.lambda_handler + MemorySize: 1024 + Role: !GetAtt ControlPlaneProvisionRole.Arn + Runtime: python3.7 + Timeout: 900 + Layers: + - arn:aws:lambda:us-east-1:812570870442:layer:k8sfull:1 + - arn:aws:lambda:us-east-1:812570870442:layer:crhelper:1 + Code: + ZipFile: | + import json + import logging + import subprocess + import os + from crhelper import CfnResource + + logger = logging.getLogger(__name__) + helper = CfnResource(json_logging=True, log_level='DEBUG') + + outdir = '/tmp' + manifest_path = '/tmp' + + os.environ['PATH'] = '/opt/kubectl:/opt/awscli:/opt/helm:' + os.environ['PATH'] + + cluster_name = os.environ.get('CLUSTER_NAME', None) + kubeconfig = os.path.join(outdir, 'kubeconfig') + + def create_kubeconfig(): + subprocess.check_call(['aws', 'eks', 'update-kubeconfig', + '--name', cluster_name, + '--kubeconfig', kubeconfig + ]) + + @helper.delete + def delete(event, _): + try: + print('Received event: %s' % json.dumps(event)) + + manifests = event['ResourceProperties']['Manifests'] + create_kubeconfig() + for i, manifest_text in enumerate(manifests): + manifest_list = json.loads(manifest_text) + manifest_file = os.path.join(outdir, f'manifest-{i}.yaml') + with open(manifest_file, "w") as f: + f.writelines(map(lambda obj: json.dumps(obj), manifest_list)) + + logger.info("manifest written to: %s" % manifest_file) + kubectl('delete', manifest_file) + except subprocess.CalledProcessError as exc: + logger.info(exc.output) + else: + logger.info('passed creating and deleting manifests') + return + + @helper.create + def create_handler(event, _): + try: + print('Received event: %s' % json.dumps(event)) + + manifests = event['ResourceProperties']['Manifests'] + create_kubeconfig() + for i, manifest_text in enumerate(manifests): + manifest_list = json.loads(manifest_text) + manifest_file = os.path.join(outdir, f'manifest-{i}.yaml') + with open(manifest_file, "w") as f: + f.writelines(map(lambda obj: json.dumps(obj), manifest_list)) + + logger.info("manifest written to: %s" % manifest_file) + kubectl('apply', manifest_file) + except subprocess.CalledProcessError as exc: + raise Exception(exc.output) + else: + logger.info('passed creating and deleting manifests') + return True + + def kubectl(verb, file): + try: + cmnd = ['kubectl', verb, '--kubeconfig', kubeconfig, '-f', file] + output = subprocess.check_output(cmnd, stderr=subprocess.STDOUT) + return output + except subprocess.CalledProcessError as exc: + raise Exception(exc.output) + else: + logger.info(output) + + def lambda_handler(event, context): + helper(event, context) + Environment: + Variables: + CLUSTER_NAME: !GetAtt KubeCreate.Name + + KubeApplyManifests: + DependsOn: [KubeCreate, KubeNodeJoin, KubeApply] + Type: Custom::KubeApplyManifests + Properties: + ServiceToken: !GetAtt KubeApplyManifestsLambda.Arn + Manifests: + - '[{"apiVersion": "v1","kind": "ServiceAccount","metadata":{"name": "tiller","namespace": "kube-system"}}]' + - '[{"apiVersion": "rbac.authorization.k8s.io/v1beta1","kind": "ClusterRoleBinding","metadata":{"name": "tiller"},"roleRef":{"apiGroup": "rbac.authorization.k8s.io","kind": "ClusterRole", "name": "cluster-admin"}, "subjects":[{"kind": "ServiceAccount","name": "tiller","namespace": "kube-system"}]}]' + - '[{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"gremlin"}}]' + + KubeApplyLambda: + Type: AWS::Lambda::Function + Properties: + Handler: index.lambda_handler + MemorySize: 1024 + Role: !GetAtt ControlPlaneProvisionRole.Arn + Runtime: python3.7 + Timeout: 900 + Layers: + - arn:aws:lambda:us-east-1:812570870442:layer:k8sfull:1 + - arn:aws:lambda:us-east-1:812570870442:layer:crhelper:1 + Code: + ZipFile: | + import json + import logging + import subprocess + import os + from crhelper import CfnResource + + logger = logging.getLogger(__name__) + helper = CfnResource(json_logging=True, log_level='DEBUG') + + outdir = '/tmp' + manifest_path = '/tmp' + + os.environ['PATH'] = '/opt/kubectl:/opt/awscli:' + os.environ['PATH'] + + cluster_name = os.environ.get('CLUSTER_NAME', None) + kubeconfig = os.path.join(outdir, 'kubeconfig') + + def create_kubeconfig(): + subprocess.check_call(['aws', 'eks', 'update-kubeconfig', + '--name', cluster_name, + '--kubeconfig', kubeconfig + ]) + + def get_config_details(event): + urls = event['ResourceProperties']['Urls'] + return urls + + @helper.delete + def delete_hanlder(event, _): + urls = get_config_details(event) + create_kubeconfig() + for u in urls: + kubectl('delete', u) + return + + @helper.create + @helper.update + def create_handler(event, _): + print('Received event: %s' % json.dumps(event)) + urls = get_config_details(event) + create_kubeconfig() + for u in urls: + kubectl('apply', u) + return True + + + def kubectl(verb, file): + try: + cmnd = ['kubectl', verb, '--kubeconfig', kubeconfig, '-f', file] + output = subprocess.check_output(cmnd, stderr=subprocess.STDOUT) + return output + except subprocess.CalledProcessError as exc: + raise Exception(exc.output) + else: + logger.info(output) + + + def lambda_handler(event, context): + helper(event, context) + + Environment: + Variables: + CLUSTER_NAME: !GetAtt KubeCreate.Name + + KubeApply: + DependsOn: [KubeCreate, KubeNodeJoin] + Type: "Custom::KubeApply" + Version: "1.0" + Properties: + ServiceToken: !GetAtt KubeApplyLambda.Arn + Urls: + - "https://gist.githubusercontent.com/allenmichael/c7c15d97a4234c1cf70c791d9ed3a5f9/raw/c56bad1dc709ade416840f0ddbde66306ed6fe61/sock-shop.yaml" + + RetrievePublicEndpointsLambda: + Type: AWS::Lambda::Function + Properties: + Handler: index.lambda_handler + MemorySize: 1024 + Role: !GetAtt ControlPlaneProvisionRole.Arn + Runtime: python3.7 + Timeout: 900 + Layers: + - arn:aws:lambda:us-east-1:812570870442:layer:k8sfull:1 + - arn:aws:lambda:us-east-1:812570870442:layer:crhelper:1 + Code: + ZipFile: | + import json + import logging + import subprocess + import os + from crhelper import CfnResource + + logger = logging.getLogger(__name__) + helper = CfnResource(json_logging=True, log_level='DEBUG') + + outdir = '/tmp' + + os.environ['PATH'] = '/opt/kubectl:/opt/awscli:' + os.environ['PATH'] + + cluster_name = os.environ.get('CLUSTER_NAME', None) + kubeconfig = os.path.join(outdir, 'kubeconfig') + + def create_kubeconfig(): + subprocess.check_call(['aws', 'eks', 'update-kubeconfig', + '--name', cluster_name, + '--kubeconfig', kubeconfig + ]) + + @helper.delete + def delete_handler(event, _): + return + + @helper.create + def create_handler(event, _): + try: + print('Received event: %s' % json.dumps(event)) + create_kubeconfig() + frontendCmd = ['kubectl', 'get','svc/front-end', '-n', 'sock-shop', '-o', 'jsonpath="{.status.loadBalancer.ingress[0].hostname}"', '--kubeconfig', kubeconfig] + frontendOutput = subprocess.check_output(frontendCmd, stderr=subprocess.STDOUT).decode("utf-8") + helper.Data['FrontendEndpoint'] = f'http://{frontendOutput[1:-1]}' + except subprocess.CalledProcessError as exc: + raise Exception(exc.output) + else: + logger.info(frontendOutput) + return True + + def lambda_handler(event, context): + helper(event, context) + Environment: + Variables: + CLUSTER_NAME: !GetAtt KubeCreate.Name + RetrievePublicEndpoints: + DependsOn: [KubeApply, KubeApplyManifests] + Type: "Custom::RetrievePublicEndpoints" + Version: "1.0" + Properties: + ServiceToken: !GetAtt RetrievePublicEndpointsLambda.Arn + + CleanELBsLambda: + Type: AWS::Lambda::Function + Properties: + Handler: index.lambda_handler + MemorySize: 1024 + Role: !GetAtt CleanupLoadBalancersRole.Arn + Runtime: python3.7 + Timeout: 900 + Layers: + - arn:aws:lambda:us-east-1:812570870442:layer:k8sfull:1 + - arn:aws:lambda:us-east-1:812570870442:layer:crhelper:1 + Code: + ZipFile: | + import boto3 + import logging + import os + from crhelper import CfnResource + + logger = logging.getLogger(__name__) + helper = CfnResource(json_logging=True, log_level='DEBUG') + + cluster_name = os.environ.get('CLUSTER_NAME', None) + + def delete_dependencies(sg_id, c): + filters = [{'Name': 'ip-permission.group-id', 'Values': [sg_id]}] + for sg in c.describe_security_groups(Filters=filters)['SecurityGroups']: + for p in sg['IpPermissions']: + if 'UserIdGroupPairs' in p.keys(): + if sg_id in [x['GroupId'] for x in p['UserIdGroupPairs']]: + try: + c.revoke_security_group_ingress(GroupId=sg['GroupId'], IpPermissions=[p]) + except Exception as e: + logger.error("ERROR: %s %s" % (sg['GroupId'], str(e))) + filters = [{'Name': 'egress.ip-permission.group-id', 'Values': [sg_id]}] + for sg in c.describe_security_groups(Filters=filters)['SecurityGroups']: + for p in sg['IpPermissionsEgress']: + if 'UserIdGroupPairs' in p.keys(): + if sg_id in [x['GroupId'] for x in p['UserIdGroupPairs']]: + try: + c.revoke_security_group_egress(GroupId=sg['GroupId'], IpPermissions=[p]) + except Exception as e: + logger.error("ERROR: %s %s" % (sg['GroupId'], str(e))) + filters = [{'Name': 'group-id', 'Values': [sg_id]}] + for eni in c.describe_network_interfaces(Filters=filters)['NetworkInterfaces']: + try: + c.delete_network_interface(NetworkInterfaceId=eni['NetworkInterfaceId']) + except Exception as e: + logger.error("ERROR: %s %s" % (eni['NetworkInterfaceId'], str(e))) + + + @helper.delete + def delete_handler(event, _): + tag_key = f"kubernetes.io/cluster/{cluster_name}" + lb_types = [ + ["elb", "LoadBalancerName", "LoadBalancerNames", "LoadBalancerDescriptions", "LoadBalancerName"], + ["elbv2", "LoadBalancerArn", "ResourceArns", "LoadBalancers", "ResourceArn"] + ] + for lt in lb_types: + elb = boto3.client(lt[0]) + lbs = [] + response = elb.describe_load_balancers() + while True: + lbs += [l[lt[1]] for l in response[lt[3]]] + if "NextMarker" in response.keys(): + response = elb.describe_load_balancers(Marker=response["NextMarker"]) + else: + break + lbs_to_remove = [] + if lbs: + lbs = elb.describe_tags(**{lt[2]: lbs})["TagDescriptions"] + for tags in lbs: + for tag in tags['Tags']: + if tag["Key"] == tag_key and tag['Value'] == "owned": + lbs_to_remove.append(tags[lt[4]]) + if lbs_to_remove: + for lb in lbs_to_remove: + print("removing elb %s" % lb) + elb.delete_load_balancer(**{lt[1]: lb}) + ec2 = boto3.client('ec2') + response = ec2.describe_tags(Filters=[ + {'Name': 'tag:%s' % tag_key, 'Values': ['owned']}, + {'Name': 'resource-type', 'Values': ['security-group']} + ]) + for t in [r['ResourceId'] for r in response['Tags']]: + try: + ec2.delete_security_group(GroupId=t) + except ec2.exceptions.ClientError as e: + if 'DependencyViolation' in str(e): + print("Dependency error on %s" % t) + delete_dependencies(t, ec2) + else: + raise + + def lambda_handler(event, context): + helper(event, context) + Environment: + Variables: + CLUSTER_NAME: !GetAtt KubeCreate.Name + CleanELBs: + Type: "Custom::CleanELBs" + Version: "1.0" + Properties: + ServiceToken: !GetAtt CleanELBsLambda.Arn + ##### END CUSTOM RESOURCES ##### +Outputs: + BastionHost: + Description: Bastion Host Instance ID + Value: !Ref BastionHost + FrontendEndpoint: + Description: Public frontend endpoint in EKS for the Sock Shop + Value: !GetAtt RetrievePublicEndpoints.FrontendEndpoint + EKSClusterName: + Description: EKS Cluster Name + Value: !GetAtt KubeCreate.Name + NAT1EIP: + Description: NAT 1 IP address + Value: !Ref "NAT1EIP" + Export: + Name: !Sub "${AWS::StackName}-NAT1EIP" + NAT2EIP: + Description: NAT 2 IP address + Value: !Ref "NAT2EIP" + Export: + Name: !Sub "${AWS::StackName}-NAT2EIP" + NAT3EIP: + Description: NAT 3 IP address + Value: !Ref "NAT3EIP" + Export: + Name: !Sub "${AWS::StackName}-NAT3EIP" + PrivateSubnet1AID: + Description: Private subnet 1A ID in Availability Zone 1 + Value: !Ref "PrivateSubnet1A" + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnet1AID" + PrivateSubnet2AID: + Description: Private subnet 2A ID in Availability Zone 2 + Value: !Ref "PrivateSubnet2A" + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnet2AID" + PrivateSubnet3AID: + Description: Private subnet 3A ID in Availability Zone 3 + Value: !Ref "PrivateSubnet3A" + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnet3AID" + PublicSubnet1ID: + Description: Public subnet 1 ID in Availability Zone 1 + Value: !Ref "PublicSubnet1" + Export: + Name: !Sub "${AWS::StackName}-PublicSubnet1ID" + PublicSubnet2ID: + Description: Public subnet 2 ID in Availability Zone 2 + Value: !Ref "PublicSubnet2" + Export: + Name: !Sub "${AWS::StackName}-PublicSubnet2ID" + PublicSubnet3ID: + Description: Public subnet 3 ID in Availability Zone 3 + Value: !Ref "PublicSubnet3" + Export: + Name: !Sub "${AWS::StackName}-PublicSubnet3ID" + PrivateSubnet1ARouteTable: + Value: !Ref "PrivateSubnet1ARouteTable" + Description: Private subnet 1A route table + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnet1ARouteTable" + PrivateSubnet2ARouteTable: + Value: !Ref "PrivateSubnet2ARouteTable" + Description: Private subnet 2A route table + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnet2ARouteTable" + PrivateSubnet3ARouteTable: + Value: !Ref "PrivateSubnet3ARouteTable" + Description: Private subnet 3A route table + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnet3ARouteTable" + PublicSubnetRouteTable: + Value: !Ref "PublicSubnetRouteTable" + Description: Public subnet route table + Export: + Name: !Sub "${AWS::StackName}-PublicSubnetRouteTable" + VPCID: + Value: !Ref "VPC" + Description: VPC ID + Export: + Name: !Sub "${AWS::StackName}-VPCID"