From c29d74f52ed336b7f9414c1e55d057768674f17f Mon Sep 17 00:00:00 2001 From: exequielrafaela Date: Fri, 1 Jul 2022 12:27:25 -0300 Subject: [PATCH 1/6] Updating apps-devstg/us-east-1/k8s-eks-v117 README.md related layers --- apps-devstg/us-east-1/k8s-eks-demoapps/README.md | 4 ++++ apps-devstg/us-east-1/k8s-eks-v1.17/README.md | 16 ++++++++-------- 2 files changed, 12 insertions(+), 8 deletions(-) create mode 100644 apps-devstg/us-east-1/k8s-eks-demoapps/README.md diff --git a/apps-devstg/us-east-1/k8s-eks-demoapps/README.md b/apps-devstg/us-east-1/k8s-eks-demoapps/README.md new file mode 100644 index 000000000..21dabe648 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks-demoapps/README.md @@ -0,0 +1,4 @@ +# AWS EKS Reference Layer (module: terraform-aws-eks v1.17x) + +follow [README.md](../k8s-eks-v1.17/README.md) + diff --git a/apps-devstg/us-east-1/k8s-eks-v1.17/README.md b/apps-devstg/us-east-1/k8s-eks-v1.17/README.md index c318e94b3..801ae8507 100644 --- a/apps-devstg/us-east-1/k8s-eks-v1.17/README.md +++ b/apps-devstg/us-east-1/k8s-eks-v1.17/README.md @@ -81,7 +81,7 @@ kind: Config clusters: - cluster: - server: https://9E9E4EC03A0E83CF00A9A02F8EFC1F00.gr7.us-east-1.eks.amazonaws.com + server: https://9E9E4XXXXXXXXXXXXXXXEFC1F00.gr7.us-east-1.eks.amazonaws.com certificate-authority-data: LS0t...S0tLQo= name: eks_bb-apps-devstg-eks-demoapps @@ -114,12 +114,6 @@ users: ``` -3. Identities layers - 1. The main files begin with the `ids_` prefix. - 1. They declare roles and their respective policies. - 2. The former are intended to be assumed by pods in your cluster through the EKS IRSA feature. - 2. Go to this layer and run `leverage tf apply` - #### Setup auth and test cluster connectivity 1. Connecting to the K8s EKS cluster 2. Since we’re deploying a private K8s cluster you’ll need to be **connected to the VPN** @@ -131,12 +125,18 @@ users: 1. https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html 5. Export AWS credentials 1. `export AWS_SHARED_CREDENTIALS_FILE="~/.aws/bb/credentials"` - 2. `export AWS_CONFIG_FILE="/.aws/bb/config"` + 2. `export AWS_CONFIG_FILE="~/.aws/bb/config"` 6. `k8s-eks-v1.17/cluster` layer should generate the `kubeconfig` file in the output of the apply, or by running `leverage tf output` similar to https://github.com/binbashar/le-devops-workflows/blob/master/README.md#eks-clusters-kubeconfig-file 1. Edit that file to replace $HOME with the path to your home dir 2. Place the kubeconfig in `~/.kube/bb/apps-devstg` and then use export `KUBECONFIG=~/.kube/bb/apps-devstg` to help tools like kubectl find a way to talk to the cluster (or `KUBECONFIG=~/.kube/bb/apps-devstg get pods --all-namespaces` ) 3. You should be now able to run kubectl commands (https://kubernetes.io/docs/reference/kubectl/cheatsheet/) +3. Identities layers + 1. The main files begin with the `ids_` prefix. + 1. They declare roles and their respective policies. + 2. The former are intended to be assumed by pods in your cluster through the EKS IRSA feature. + 2. Go to this layer and run `leverage tf apply` + ### K8s EKS Cluster Components and Workloads deployment 1. Cluster Components (k8s-resources) From 128df7fe91cfcc0ef02346443e0aea85dfd139f1 Mon Sep 17 00:00:00 2001 From: exequielrafaela Date: Fri, 1 Jul 2022 12:28:22 -0300 Subject: [PATCH 2/6] Creating apps-devstg/us-east-1/k8s-eks/cluster layer --- .../k8s-eks/cluster/.terraform.lock.hcl | 81 ++++++++ .../k8s-eks/cluster/common-variables.tf | 159 ++++++++++++++++ .../us-east-1/k8s-eks/cluster/config.tf | 70 +++++++ .../k8s-eks/cluster/eks-managed-nodes.tf | 174 ++++++++++++++++++ .../us-east-1/k8s-eks/cluster/locals.tf | 62 +++++++ .../us-east-1/k8s-eks/cluster/outputs.tf | 42 +++++ .../us-east-1/k8s-eks/cluster/variables.tf | 40 ++++ 7 files changed, 628 insertions(+) create mode 100644 apps-devstg/us-east-1/k8s-eks/cluster/.terraform.lock.hcl create mode 100644 apps-devstg/us-east-1/k8s-eks/cluster/common-variables.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/cluster/config.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/cluster/eks-managed-nodes.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/cluster/locals.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/cluster/outputs.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/cluster/variables.tf diff --git a/apps-devstg/us-east-1/k8s-eks/cluster/.terraform.lock.hcl b/apps-devstg/us-east-1/k8s-eks/cluster/.terraform.lock.hcl new file mode 100644 index 000000000..cd310b390 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/cluster/.terraform.lock.hcl @@ -0,0 +1,81 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.20.1" + constraints = ">= 3.72.0, ~> 4.10" + hashes = [ + "h1:HHfwMYY0FDtMzaGgITqsPIBlUWnQNZ5+bTF1dyscsnw=", + "zh:21d064d8fac08376c633e002e2f36e83eb7958535e251831feaf38f51c49dafd", + "zh:3a37912ff43d89ce8d559ec86265d7506801bccb380c7cfb896e8ff24e3fe79d", + "zh:795eb175c85279ec51dbe12e4d1afa0860c2c0b22e5d36a8e8869f60a93b7931", + "zh:8afb61a18b17f8ff249cb23e9d3b5d2530944001ef1d56c1d53f41b0890c7ab8", + "zh:911701040395e0e4da4b7252279e7cf1593cdd26f22835e1a9eddbdb9691a1a7", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:a46d54a6a5407f569f8178e916af888b2b268f86448c64cad165dc89759c8399", + "zh:c5f71fd5e3519a24fd6af455ef1c26a559cfdde7f626b0afbd2a73bb79f036b1", + "zh:df3b69d6c9b0cdc7e3f90ee08412b22332c32e97ad8ce6ccad528f89f235a7d3", + "zh:e99d6a64c03549d60c2accf792fa04466cfb317f72e895c8f67eff8a02920887", + "zh:eea7a0df8bcb69925c9ce8e15ef403c8bbf16d46c43e8f5607b116531d1bce4a", + "zh:f6a26ce77f7db1d50ce311e32902fd001fb365e5e45e47a9a5cd59d734c89cb6", + ] +} + +provider "registry.terraform.io/hashicorp/cloudinit" { + version = "2.2.0" + constraints = ">= 2.0.0" + hashes = [ + "h1:tQLNREqesrdCQ/bIJnl0+yUK+XfdWzAG0wo4lp10LvM=", + "zh:76825122171f9ea2287fd27e23e80a7eb482f6491a4f41a096d77b666896ee96", + "zh:795a36dee548e30ca9c9d474af9ad6d29290e0a9816154ad38d55381cd0ab12d", + "zh:9200f02cb917fb99e44b40a68936fd60d338e4d30a718b7e2e48024a795a61b9", + "zh:a33cf255dc670c20678063aa84218e2c1b7a67d557f480d8ec0f68bc428ed472", + "zh:ba3c1b2cd0879286c1f531862c027ec04783ece81de67c9a3b97076f1ce7f58f", + "zh:bd575456394428a1a02191d2e46af0c00e41fd4f28cfe117d57b6aeb5154a0fb", + "zh:c68dd1db83d8437c36c92dc3fc11d71ced9def3483dd28c45f8640cfcd59de9a", + "zh:cbfe34a90852ed03cc074601527bb580a648127255c08589bc3ef4bf4f2e7e0c", + "zh:d6ffd7398c6d1f359b96f5b757e77b99b339fbb91df1b96ac974fe71bc87695c", + "zh:d9c15285f847d7a52df59e044184fb3ba1b7679fd0386291ed183782683d9517", + "zh:f7dd02f6d36844da23c9a27bb084503812c29c1aec4aba97237fec16860fdc8c", + ] +} + +provider "registry.terraform.io/hashicorp/kubernetes" { + version = "2.11.0" + constraints = ">= 2.10.0, ~> 2.10" + hashes = [ + "h1:pJiAJwZKUaoAJ4x+3ONJkwEVkjrwGROCGFgj7noPO58=", + "zh:143a19dd0ea3b07fc5e3d9231f3c2d01f92894385c98a67327de74c76c715843", + "zh:1fc757d209e09c3cf7848e4274daa32408c07743698fbed10ee52a4a479b62b6", + "zh:22dfebd0685749c51a8f765d51a1090a259778960ac1cd4f32021a325b2b9b72", + "zh:3039b3b76e870cd8fc404cf75a29c66b171c6ba9b6182e131b6ae2ca648ec7c0", + "zh:3af0a15562fcab4b5684b18802e0239371b2b8ff9197ed069ff4827f795a002b", + "zh:50aaf20336d1296a73315adb66f7687f75bd5c6b1f93a894b95c75cc142810ec", + "zh:682064fabff895ec351860b4fe0321290bbbb17c2a410b62c9bea0039400650e", + "zh:70ac914d5830b3371a2679d8f77cc20c419a6e12925145afae6c977c8eb90934", + "zh:710aa02cccf7b0f3fb50880d6d2a7a8b8c9435248666616844ba71f74648cddc", + "zh:88e418118cd5afbdec4984944c7ab36950bf48e8d3e09e090232e55eecfb470b", + "zh:9cef159377bf23fa331f8724fdc6ce27ad39a217a4bae6df3b1ca408fc643da6", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/tls" { + version = "3.4.0" + constraints = ">= 3.0.0" + hashes = [ + "h1:oyllIA9rNGCFtClSyBitUIzCXdnKtspVepdsvpLlfys=", + "zh:2442a0df0cfb550b8eba9b2af39ac06f54b62447eb369ecc6b1c29f739b33bbb", + "zh:3ebb82cacb677a099de55f844f0d02886bc804b1a2b94441bc40fabcb64d2a38", + "zh:436125c2a7e66bc62a4a7c68bdca694f071d7aa894e8637dc83f4a68fe322546", + "zh:5f03db9f1d77e8274ff4750ae32d5c16c42b862b06bcb0683e4d733c8db922e4", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:8190142ae8a539ab34193b7e75da0fa04035d1dcd8af8be94df1eafeeffb44b6", + "zh:8cdc7cd9221e27c189e5beaf78462fce4c2edb081f415a1eafc6da2949de31e2", + "zh:a5de0f7f5d63c59ebf61d3c1d94040f410665ff0aa04f66674efe24b39a11f94", + "zh:a9fce48db3c140cc3e06f8a3c7ef4d36735e457e7660442d6d5dcd2b0781adc3", + "zh:beb92de584c790c7c7f047e45ccd22b6ee3263c7b5a91ae4d6882ae6e7700570", + "zh:f373f8cc52846fb513f44f468d885f722ca4dc22af9ff1942368cafd16b796b3", + "zh:f69627fd6e5a920b17ff423cdbad2715078ca6d13146dc67668795582ab43748", + ] +} diff --git a/apps-devstg/us-east-1/k8s-eks/cluster/common-variables.tf b/apps-devstg/us-east-1/k8s-eks/cluster/common-variables.tf new file mode 100644 index 000000000..9508d114e --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/cluster/common-variables.tf @@ -0,0 +1,159 @@ +#================================# +# Common variables # +#================================# + +# +# config/backend.config +# +#================================# +# Terraform AWS Backend Settings # +#================================# +variable "region" { + type = string + description = "AWS Region" +} + +variable "profile" { + type = string + description = "AWS Profile (required by the backend but also used for other resources)" +} + +variable "bucket" { + type = string + description = "AWS S3 TF State Backend Bucket" +} + +variable "dynamodb_table" { + type = string + description = "AWS DynamoDB TF Lock state table name" +} + +variable "encrypt" { + type = bool + description = "Enable AWS DynamoDB with server side encryption" +} + +# +# config/base.config +# +#=============================# +# Project Variables # +#=============================# +variable "project" { + type = string + description = "Project Name" +} + +variable "project_long" { + type = string + description = "Project Long Name" +} + +variable "environment" { + type = string + description = "Environment Name" +} + +# +# config/extra.config +# +#=============================# +# Accounts & Extra Vars # +#=============================# +variable "region_primary" { + type = string + description = "AWS Primary Region for HA" +} + +variable "region_secondary" { + type = string + description = "AWS Scondary Region for HA" +} + +variable "accounts" { + type = map(any) + description = "Accounts descriptions" +} + +variable "root_account_id" { + type = string + description = "Account: Root" +} + +variable "security_account_id" { + type = string + description = "Account: Security & Users Management" +} + +variable "shared_account_id" { + type = string + description = "Account: Shared Resources" +} + +variable "network_account_id" { + type = string + description = "Account: Networking Resources" +} + +variable "appsdevstg_account_id" { + type = string + description = "Account: Dev Modules & Libs" +} + +variable "appsprd_account_id" { + type = string + description = "Account: Prod Modules & Libs" +} + +variable "vault_address" { + type = string + description = "Vault Address" +} + +variable "vault_token" { + type = string + description = "Vault Token" +} + +#=============================# +# AWS SSO Variables # +#=============================# +variable "sso_role" { + description = "SSO Role Name" +} + +variable "sso_enabled" { + type = string + description = "Enable SSO Service" +} + +variable "sso_region" { + type = string + description = "SSO Region" +} + +variable "sso_start_url" { + type = string + description = "SSO Start Url" +} + +#===========================================# +# Networking # +#===========================================# +variable "enable_tgw" { + description = "Enable Transit Gateway Support" + type = bool + default = false +} + +variable "enable_tgw_multi_region" { + description = "Enable Transit Gateway multi region support" + type = bool + default = false +} + +variable "tgw_cidrs" { + description = "CIDRs to be added as routes to public RT" + type = list(string) + default = [] +} diff --git a/apps-devstg/us-east-1/k8s-eks/cluster/config.tf b/apps-devstg/us-east-1/k8s-eks/cluster/config.tf new file mode 100644 index 000000000..43331131a --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/cluster/config.tf @@ -0,0 +1,70 @@ +# +# Providers +# +provider "aws" { + region = var.region + profile = var.profile +} + +provider "kubernetes" { + host = data.aws_eks_cluster.cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + token = data.aws_eks_cluster_auth.cluster.token +} + +# +# Backend Config (partial) +# +terraform { + required_version = "~> 1.1.3" + + required_providers { + aws = "~> 4.10" + kubernetes = "~> 2.10" + } + + backend "s3" { + key = "apps-devstg/k8s-eks/cluster/terraform.tfstate" + } +} + +# +# Data Sources +# +data "aws_eks_cluster" "cluster" { + name = module.cluster.cluster_id +} + +data "aws_eks_cluster_auth" "cluster" { + name = module.cluster.cluster_id +} + +data "terraform_remote_state" "eks-vpc" { + backend = "s3" + config = { + region = var.region + profile = var.profile + bucket = var.bucket + key = "apps-devstg/k8s-eks/network/terraform.tfstate" + } +} + +data "terraform_remote_state" "keys" { + backend = "s3" + config = { + region = var.region + profile = var.profile + bucket = var.bucket + key = "apps-devstg/security-keys/terraform.tfstate" + } +} + +data "terraform_remote_state" "shared-vpc" { + backend = "s3" + config = { + region = var.region + profile = "${var.project}-shared-devops" + bucket = "${var.project}-shared-terraform-backend" + key = "shared/network/terraform.tfstate" + } +} diff --git a/apps-devstg/us-east-1/k8s-eks/cluster/eks-managed-nodes.tf b/apps-devstg/us-east-1/k8s-eks/cluster/eks-managed-nodes.tf new file mode 100644 index 000000000..01562b63d --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/cluster/eks-managed-nodes.tf @@ -0,0 +1,174 @@ +module "cluster" { + source = "github.com/binbashar/terraform-aws-eks.git?ref=v18.24.1" + + create = true + cluster_name = data.terraform_remote_state.eks-vpc.outputs.cluster_name + cluster_version = var.cluster_version + enable_irsa = true + + # Configure networking + vpc_id = data.terraform_remote_state.eks-vpc.outputs.vpc_id + subnet_ids = data.terraform_remote_state.eks-vpc.outputs.private_subnets + + # Configure public/private cluster endpoints + cluster_endpoint_private_access = var.cluster_endpoint_private_access + cluster_endpoint_public_access = var.cluster_endpoint_public_access + + # Configure cluster inbound/outbound rules + create_cluster_security_group = var.create_cluster_security_group + cluster_security_group_additional_rules = { + ingress_shared_vpc_443 = { + description = "Shared VPC to Cluster API" + protocol = "tcp" + from_port = 443 + to_port = 443 + type = "ingress" + cidr_blocks = [ + data.terraform_remote_state.shared-vpc.outputs.vpc_cidr_block + ] + } + } + + # Configure node inbound/outbound rules + node_security_group_additional_rules = { + # + # NOTE: these 2 rules below allow all communication between nodes. + # A more secure approach would only allow specific ports & protocols to + # communicate between nodes. However, although said approach can be + # achieved, it requires a deeper understanding of the architecture of + # the components and workloads that you run in the cluster. + # + ingress_self_all = { + description = "Node to Node all ports & protocols" + protocol = -1 + from_port = 0 + to_port = 0 + type = "ingress" + self = true + }, + egress_self_all = { + description = "Node to Node all ports & protocols" + protocol = -1 + from_port = 0 + to_port = 0 + type = "egress" + self = true + }, + # + # Admission controller rules + # + ingress_nginx_ingress_admission_controller_webhook_tcp = { + description = "Cluster API to Nginx Ingress Admission Controller Webhook" + protocol = "tcp" + from_port = 8443 + to_port = 8443 + type = "ingress" + source_cluster_security_group = true + }, + ingress_alb_ingress_admission_controller_webhook_tcp = { + description = "Cluster API to ALB Ingress Admission Controller Webhook" + protocol = "tcp" + from_port = 9443 + to_port = 9443 + type = "ingress" + source_cluster_security_group = true + }, + # + # DNS communication with the Internet + # + egress_public_dns_tcp = { + description = "Node to public DNS servers" + protocol = "tcp" + from_port = 53 + to_port = 53 + type = "egress" + cidr_blocks = ["0.0.0.0/0"] + }, + egress_public_dns_udp = { + description = "Node to public DNS servers" + protocol = "udp" + from_port = 53 + to_port = 53 + type = "egress" + cidr_blocks = ["0.0.0.0/0"] + }, + # + # Access to resources in EKS VPC + # + egress_eks_private_subnets_tcp = { + description = "Node to EKS Private Subnets" + protocol = "tcp" + from_port = 1024 + to_port = 65535 + type = "egress" + cidr_blocks = [data.terraform_remote_state.eks-vpc.outputs.private_subnets_cidr[0]] + }, + # + # Access to resources in Shared VPC + # + egress_shared_vpc_all = { + description = "Node to HTTPS endpoints on Shared VPC" + protocol = "tcp" + from_port = 443 + to_port = 443 + type = "egress" + cidr_blocks = [data.terraform_remote_state.shared-vpc.outputs.vpc_cidr_block] + }, + } + + # Specify the CIDR of k8s services -- Ref: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster#kubernetes_network_config + cluster_service_ipv4_cidr = "10.100.0.0/16" + + # Encrypt selected k8s resources with this account's KMS CMK + cluster_encryption_config = [{ + provider_key_arn = data.terraform_remote_state.keys.outputs.aws_kms_key_arn + resources = ["secrets"] + }] + + # Define Managed Nodes Groups (MNG's) default settings + eks_managed_node_group_defaults = { + # Managed Nodes cannot specify custom AMIs, only use the ones allowed by EKS + ami_type = "AL2_x86_64" + disk_size = 50 + instance_types = ["t2.medium"] + k8s_labels = local.tags + } + + # Define all Managed Node Groups (MNG's) + eks_managed_node_groups = { + on-demand = { + min_size = 1 + max_size = 3 + desired_size = 1 + capacity_type = "ON_DEMAND" + instance_types = ["t2.medium", "t3.medium"] + } + spot = { + desired_capacity = 1 + max_capacity = 3 + min_capacity = 1 + capacity_type = "SPOT" + instance_types = ["t2.medium", "t3.medium"] + } + } + + # Configure which roles, users and accounts can access the k8s api + manage_aws_auth_configmap = var.manage_aws_auth + aws_auth_roles = local.map_roles + aws_auth_users = local.map_users + aws_auth_accounts = local.map_accounts + + # Configure which log types should be enabled and how long they should be kept for + cluster_enabled_log_types = [ + "api", + "audit", + "authenticator", + ] + cloudwatch_log_group_retention_in_days = var.cluster_log_retention_in_days + + # Define tags (notice we are appending here tags required by the cluster autoscaler) + tags = merge(local.tags, + { "k8s.io/cluster-autoscaler/enabled" = "TRUE" }, + { "k8s.io/cluster-autoscaler/${data.terraform_remote_state.eks-vpc.outputs.cluster_name}" = "owned" } + ) +} diff --git a/apps-devstg/us-east-1/k8s-eks/cluster/locals.tf b/apps-devstg/us-east-1/k8s-eks/cluster/locals.tf new file mode 100644 index 000000000..7bb3ba00d --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/cluster/locals.tf @@ -0,0 +1,62 @@ +locals { + tags = { + Terraform = "true" + Environment = var.environment + Project = var.project + } + + # Additional AWS account numbers to add to the aws-auth configmap + # + map_accounts = [ + # var.security_account_id, # security + # var.shared_account_id, # shared + # var.appsdevstg_account_id, # apps-devstg + ] + + # Additional IAM users to add to the aws-auth configmap. See examples/basic/variables.tf for example format + # + map_users = [ + # { + # userarn = "arn:aws:iam:${var.security_account_id}:user/jane.doe" + # username = "jane.doe" + # groups = ["system:masters"] + # }, + # { + # userarn = "arn:aws:iam:${var.security_account_id}:user/john.doe" + # username = "john.doe" + # groups = ["system:masters"] + # }, + ] + + # Additional IAM roles to add to the aws-auth configmap. + # + map_roles = [ + # + # Github Actions Workflow will assume this role in order to be able to destroy the cluster + # + { + rolearn = "arn:aws:iam::${var.appsdevstg_account_id}:role/DeployMaster" + username = "DeployMaster" + groups = [ + "system:masters"] + }, + # + # Allow DevOps role to become cluster admins + # + { + rolearn = "arn:aws:iam::${var.appsdevstg_account_id}:role/DevOps" + username = "DevOps" + groups = [ + "system:masters"] + }, + # + # Allow DevOps SSO role to become cluster admins + # + { + rolearn = "arn:aws:iam::${var.appsdevstg_account_id}:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_DevOps_5e0501636a32f9c4" + username = "DevOps" + groups = [ + "system:masters"] + }, + ] +} diff --git a/apps-devstg/us-east-1/k8s-eks/cluster/outputs.tf b/apps-devstg/us-east-1/k8s-eks/cluster/outputs.tf new file mode 100644 index 000000000..84c426289 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/cluster/outputs.tf @@ -0,0 +1,42 @@ +# +# EKS Module +# +output "cluster_id" { + description = "EKS Cluster ID" + value = module.cluster.cluster_id +} + +output "cluster_name" { + description = "EKS Cluster Name" + value = data.terraform_remote_state.eks-vpc.outputs.cluster_name +} + +output "cluster_endpoint" { + description = "Endpoint for EKS control plane." + value = module.cluster.cluster_endpoint +} + +output "cluster_oidc_issuer_url" { + description = "EKS OpenID Connect Issuer URL." + value = module.cluster.cluster_oidc_issuer_url +} + +output "cluster_primary_security_group_id" { + description = "Security group ids attached to the cluster control plane." + value = module.cluster.cluster_primary_security_group_id +} + +output "cluster_kubeconfig_instructions" { + description = "Instructions to generate a kubeconfig file." + value = < Date: Fri, 1 Jul 2022 12:28:58 -0300 Subject: [PATCH 3/6] Adding 1st apps-devstg/us-east-1/k8s-eks README.md version --- apps-devstg/us-east-1/k8s-eks/README.md | 195 ++++++++++++++++++++++++ 1 file changed, 195 insertions(+) create mode 100644 apps-devstg/us-east-1/k8s-eks/README.md diff --git a/apps-devstg/us-east-1/k8s-eks/README.md b/apps-devstg/us-east-1/k8s-eks/README.md new file mode 100644 index 000000000..4ce6fbc03 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/README.md @@ -0,0 +1,195 @@ +# AWS EKS Reference Layer + +## Overview +This documentation should help you understand the different pieces that make up this +EKS cluster. With such understanding you should be able to create your copies of this +cluster that are modified to serve other goals, such as having a cluster per environment. + +Terraform code to orchestrate and deploy our EKS (cluster, network, k8s resources) reference +architecture. Consider that we already have an [AWS Landing Zone](https://github.com/binbashar/le-tf-infra-aws) +deployed as baseline which allow us to create, extend and enable new components on its grounds. + +## Code Organization +The EKS layer (`apps-devstg/us-east-1/k8s-eks`) is divided into sub-layers which +have clear, specific purposes. + +### The "network" layer +This is where we define the VPC resources for this cluster. + +### The "cluster" layer +This is used to define the cluster attributes such as node groups and kubernetes version. + +### The "identities" layer +This layer defines EKS IRSA roles that are later on assumed by roles running in the cluster. + +### The "k8s-components" layer +This here defines the base cluster components such as ingress controllers, +certificate managers, dns managers, ci/cd components, and more. + +### The "k8s-workloads" layer +This here defines the cluster workloads such as web-apps, apis, back-end microservices, etc. + +## Important: read this if you are copying the EKS layer to stand up a new cluster +The typical use cases would be: +- You need to set up a new cluster in a new account +- Or you need to set up another cluster in an existing account which already has a cluster + +Below we'll cover the first case but we'll assume that we are creating the `prd` cluster from the code that +defines the `devstg` cluster: +1. First, you would copy-paste an existing EKS layer along with all its sub-layers: `cp -r apps-devstg/us-east-1/k8s-eks apps-prd/us-east-1/k8s-eks` +2. Then, you need to go through each layer, open up the `config.tf` file and replace any occurrences of `devstg` with `prd`. + 1. There should be a `config.tf` in each sublayer so please make sure you cover all of them. + +Now that you created the layers for the cluster you need to create a few other layers in the +new account that the cluster layers depend on, they are: +3. The `security-keys` layer + - This layer creates a KMS key that we use for encrypting EKS state. + - The procedure to create this layer is similar to the previous steps. + - You need to copy the layer from the `devstg` account and adjust its files to replace occurrences of `devstg` with `prd`. + - Finally you need to run the Terraform Workflow (init and apply). +4. The `security-certs` layer + - This layer creates the AWS Certificate Manager certificates that are used by the AWS ALBs that are created by the ALB Ingress Controller. + - A similar procedure to create this layer. Get this layer from `devstg`, replace references to `devstg` with `prd`, and then run init & apply. + +### Current EKS Cluster Creation Workflows + +Following the [leverage terraform workflow](https://leverage.binbash.com.ar/user-guide/ref-architecture-aws/workflow/) +The EKS layers need to be orchestrated in the following order: + +1. Network + 1. Open the `locals.tf` file and make sure the VPC CIDR and subnets are correct. + 1. Check the CIDR/subnets definition that were made for DevStg and Prd clusters and avoid segments overlapping. + 2. In the same `locals.tf` file, there is a "VPC Peerings" section. + 1. Make sure it contains the right entries to match the VPC peerings that you actually need to set up. + 3. In the `variables.tf` file you will find several variables you can use to configure multiple settings. + 1. For instance, if you anticipate this cluster is going to be permanent, you could set the `vpc_enable_nat_gateway` flag to `true`; + 2. or if you are standing up a production cluster, you may want to set `vpc_single_nat_gateway` to `false` in order to have a NAT Gateways per availability zone. +2. Cluster + 1. Since we’re deploying a private K8s cluster you’ll need to be **connected to the VPN** + 2. Check out the `variables.tf` file to configure the Kubernetes version or whether you want to create a cluster with a public endpoint (in most cases you don't but the possibility is there). + 3. Open up `locals.tf` and make sure the `map_accounts`, `map_users` and `map_roles` variables define the right accounts, users and roles that will be granted permissions on the cluster. + 4. Then open `eks-managed-nodes.tf` to set the node groups and their attributes according to your requirements. + 1. In this file you can also configure security group rules, both for granting access to the cluster API or to the nodes. + 5. Go to this layer and run `leverage tf apply` + 6. In the output you should see the credentials you need to talk to Kubernetes API via kubectl (or other clients). + +#### Setup auth and test cluster connectivity +1. Connecting to the K8s EKS cluster +2. Since we’re deploying a private K8s cluster you’ll need to be **connected to the VPN** +3. install `kubetcl` in your workstation + 1. https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#install-using-native-package-management + 2. https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#install-with-homebrew-on-macos + 3. 📒 NOTE: consider using `kubectl` version 1.22-1.24 (depending on your cluster version) +4. If working with AWS SSO approach refresh your temporary credentials + 1. `leverage terraform init` +5. Export AWS credentials + 1. `export AWS_SHARED_CREDENTIALS_FILE="~/.aws/bb/credentials"` + 2. `export AWS_CONFIG_FILE="~/.aws/bb/config"` +6. To generate `k8s-eks/cluster` layer `kubeconfig` file + 1. `export KUBECONFIG=~/.kube/bb/config-bb-devstg-k8s-eks` + 2. `aws eks update-kubeconfig --region us-east-1 --name bb-apps-devstg-eks-1ry --profile bb-apps-devstg-devops` + 3. Edit `~/.kube/bb/apps-devstg/config-bb-devstg-k8s-eks` and add the proper env vars to let kubeconfig notice the AWS creds path + ``` + env: + - name: AWS_PROFILE + value: bb-apps-devstg-devops + - name: AWS_CONFIG_FILE + value: /Users/exequielbarrirero/.aws/bb/config + - name: AWS_SHARED_CREDENTIALS_FILE + value: /Users/exequielbarrirero/.aws/bb/credentials + ``` + 4. Place the kubeconfig in `~/.kube/bb/apps-devstg` and then use export `KUBECONFIG=~/.kube/bb/apps-devstg` to help tools like kubectl find a way to talk to the cluster (or `KUBECONFIG=~/.kube/bb/apps-devstg get pods --all-namespaces` ) + 5. You should be now able to run kubectl commands (https://kubernetes.io/docs/reference/kubectl/cheatsheet/) + +#### Example kubeconfig +``` +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQXXXXXXXXXXXXXXXXXXXXXXXXXXXUFBNPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://16DXXXXXXXXXXXXXXXXXXXX1C33.gr7.us-east-1.eks.amazonaws.com + name: arn:aws:eks:us-east-1:XXXXXXXXXXXX:cluster/bb-apps-devstg-eks-1ry +contexts: +- context: + cluster: arn:aws:eks:us-east-1:XXXXXXXXXXXX:cluster/bb-apps-devstg-eks-1ry + user: arn:aws:eks:us-east-1:XXXXXXXXXXXX:cluster/bb-apps-devstg-eks-1ry + name: arn:aws:eks:us-east-1:XXXXXXXXXXXX:cluster/bb-apps-devstg-eks-1ry +current-context: arn:aws:eks:us-east-1:XXXXXXXXXXXX:cluster/bb-apps-devstg-eks-1ry +kind: Config +preferences: {} +users: +- name: arn:aws:eks:us-east-1:XXXXXXXXXXXX:cluster/bb-apps-devstg-eks-1ry + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + args: + - --region + - us-east-1 + - eks + - get-token + - --cluster-name + - bb-apps-devstg-eks-1ry + command: aws + env: + - name: AWS_PROFILE + value: bb-apps-devstg-devops + - name: AWS_CONFIG_FILE + value: /Users/exequielbarrirero/.aws/bb/config + - name: AWS_SHARED_CREDENTIALS_FILE + value: /Users/exequielbarrirero/.aws/bb/credentials +``` + +3. Identities layers + 1. The main files begin with the `ids_` prefix. + 1. They declare roles and their respective policies. + 2. The former are intended to be assumed by pods in your cluster through the EKS IRSA feature. + 2. Go to this layer and run `leverage tf apply` + +### K8s EKS Cluster Components and Workloads deployment + +1. Cluster Components (k8s-resources) + 1. Go to this layer and run `leverage tf apply` + 2. You can use the `apps.auto.tfvars` file to configure which components get installed + 3. Important: For private repo integrations after ArgoCD was successfully installed you will need to create this secret object in the cluster. Before creating the secret you need to update it to add the private SSH key that will grant ArgoCD permission to read the repository where the application definition files can be located. Note that this manual step is only a workaround that could be automated to simplify the orchestration. +2. Workloads (k8s-workloads) + 1. Go to this layer and run `leverage tf apply` + +## Accessing the EKS Kubernetes resources (connectivity) +To access the Kubernetes resources using `kubectl` take into account that you need **connect +to the VPN** since all our implementations are via private endpoints (private VPC subnets). + +### Connecting to ArgoCD + 1. Since we’re deploying a private K8s cluster you’ll need to be connected to the VPN + 2. From your web browser access to https://argocd.us-east-1.devstg.aws.binbash.com.ar/ + 3. Considering the current `4.5.7` version we are using the default password it's stored in a secret. + 1. To obtain it, use this command: `kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d` + 4. As Username, the default user is **admin**. + 5. You'll see the [EmojiVoto demo app](https://github.com/binbashar/le-demo-apps/tree/master/emojivoto/argocd) deployed and accessible at https://emojivoto.devstg.aws.binbash.com.ar/ + +**CONSIDERATION** +When running kubectl commands you could expect to get the following warning + +`/apps-devstg/us-east-1/k8s-eks/cluster$ kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2` + +``` +Cache file /home/user/.kube/cache/aws-iam-authenticator/credentials.yaml does not exist. +No cached credential available. Refreshing... +Unable to cache credential: ProviderNotExpirer: provider SharedConfigCredentials: /home/user/.aws/bb/credentials does not support ExpiresAt() +``` + +about aws-iam-authenticator `not finding an “expiresat” entry in this file /home/user/.aws/bb/credentials` + +**UPDATE on the kubectl/aws-iam-authenticator warning:** + +it seems to be related to this https://github.com/kubernetes-sigs/aws-iam-authenticator/issues/219 +basically kubectl delegates on aws-iam-authenticator to retrieve the token it needs to talk to the k8s API but aws-iam-auth fails to provide that in the format that is expected by kubectl , given that is using an SSO flow it’s missing the ExpiresAt field. + +In other words, using the old AWS IAM flow, aws-iam-auth is able to comply because that flow does include an expiration value besides the temporary credentials; but the SSO flow doesn’t include the expiration value for the temporary credentials as such expiration exists at the SSO token level, not at temporary credentials level (which are obtained through said token) + +## Post-initial Orchestration +After the initial orchestration, the typical flow could include multiple tasks. In other words, there won't be a normal flow but you some of the operations you would need to perform are: +- Update Kubernetes versions +- Update cluster components versions +- Add/remove/update cluster components settings +- Update network settings (e.g. toggle NAT Gateway, update Network ACLs, etc) +- Update IRSA roles/policies to grant/remove/fine-tune permissions From 2da67396243b119855c167ea0208d680d0a24fae Mon Sep 17 00:00:00 2001 From: exequielrafaela Date: Fri, 1 Jul 2022 12:50:26 -0300 Subject: [PATCH 4/6] make pre-commit applied to format code --- apps-devstg/us-east-1/k8s-eks-v1.17/README.md | 2 +- apps-devstg/us-east-1/k8s-eks/README.md | 36 +++++++++---------- .../k8s-eks/cluster/eks-managed-nodes.tf | 10 +++--- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/apps-devstg/us-east-1/k8s-eks-v1.17/README.md b/apps-devstg/us-east-1/k8s-eks-v1.17/README.md index 801ae8507..454ac81b1 100644 --- a/apps-devstg/us-east-1/k8s-eks-v1.17/README.md +++ b/apps-devstg/us-east-1/k8s-eks-v1.17/README.md @@ -136,7 +136,7 @@ users: 1. They declare roles and their respective policies. 2. The former are intended to be assumed by pods in your cluster through the EKS IRSA feature. 2. Go to this layer and run `leverage tf apply` - + ### K8s EKS Cluster Components and Workloads deployment 1. Cluster Components (k8s-resources) diff --git a/apps-devstg/us-east-1/k8s-eks/README.md b/apps-devstg/us-east-1/k8s-eks/README.md index 4ce6fbc03..ce5e7ad84 100644 --- a/apps-devstg/us-east-1/k8s-eks/README.md +++ b/apps-devstg/us-east-1/k8s-eks/README.md @@ -23,7 +23,7 @@ This is used to define the cluster attributes such as node groups and kubernetes This layer defines EKS IRSA roles that are later on assumed by roles running in the cluster. ### The "k8s-components" layer -This here defines the base cluster components such as ingress controllers, +This here defines the base cluster components such as ingress controllers, certificate managers, dns managers, ci/cd components, and more. ### The "k8s-workloads" layer @@ -34,17 +34,17 @@ The typical use cases would be: - You need to set up a new cluster in a new account - Or you need to set up another cluster in an existing account which already has a cluster -Below we'll cover the first case but we'll assume that we are creating the `prd` cluster from the code that +Below we'll cover the first case but we'll assume that we are creating the `prd` cluster from the code that defines the `devstg` cluster: 1. First, you would copy-paste an existing EKS layer along with all its sub-layers: `cp -r apps-devstg/us-east-1/k8s-eks apps-prd/us-east-1/k8s-eks` -2. Then, you need to go through each layer, open up the `config.tf` file and replace any occurrences of `devstg` with `prd`. +2. Then, you need to go through each layer, open up the `config.tf` file and replace any occurrences of `devstg` with `prd`. 1. There should be a `config.tf` in each sublayer so please make sure you cover all of them. - -Now that you created the layers for the cluster you need to create a few other layers in the + +Now that you created the layers for the cluster you need to create a few other layers in the new account that the cluster layers depend on, they are: 3. The `security-keys` layer - This layer creates a KMS key that we use for encrypting EKS state. - - The procedure to create this layer is similar to the previous steps. + - The procedure to create this layer is similar to the previous steps. - You need to copy the layer from the `devstg` account and adjust its files to replace occurrences of `devstg` with `prd`. - Finally you need to run the Terraform Workflow (init and apply). 4. The `security-certs` layer @@ -57,18 +57,18 @@ Following the [leverage terraform workflow](https://leverage.binbash.com.ar/user The EKS layers need to be orchestrated in the following order: 1. Network - 1. Open the `locals.tf` file and make sure the VPC CIDR and subnets are correct. + 1. Open the `locals.tf` file and make sure the VPC CIDR and subnets are correct. 1. Check the CIDR/subnets definition that were made for DevStg and Prd clusters and avoid segments overlapping. - 2. In the same `locals.tf` file, there is a "VPC Peerings" section. + 2. In the same `locals.tf` file, there is a "VPC Peerings" section. 1. Make sure it contains the right entries to match the VPC peerings that you actually need to set up. - 3. In the `variables.tf` file you will find several variables you can use to configure multiple settings. - 1. For instance, if you anticipate this cluster is going to be permanent, you could set the `vpc_enable_nat_gateway` flag to `true`; + 3. In the `variables.tf` file you will find several variables you can use to configure multiple settings. + 1. For instance, if you anticipate this cluster is going to be permanent, you could set the `vpc_enable_nat_gateway` flag to `true`; 2. or if you are standing up a production cluster, you may want to set `vpc_single_nat_gateway` to `false` in order to have a NAT Gateways per availability zone. 2. Cluster 1. Since we’re deploying a private K8s cluster you’ll need to be **connected to the VPN** 2. Check out the `variables.tf` file to configure the Kubernetes version or whether you want to create a cluster with a public endpoint (in most cases you don't but the possibility is there). - 3. Open up `locals.tf` and make sure the `map_accounts`, `map_users` and `map_roles` variables define the right accounts, users and roles that will be granted permissions on the cluster. - 4. Then open `eks-managed-nodes.tf` to set the node groups and their attributes according to your requirements. + 3. Open up `locals.tf` and make sure the `map_accounts`, `map_users` and `map_roles` variables define the right accounts, users and roles that will be granted permissions on the cluster. + 4. Then open `eks-managed-nodes.tf` to set the node groups and their attributes according to your requirements. 1. In this file you can also configure security group rules, both for granting access to the cluster API or to the nodes. 5. Go to this layer and run `leverage tf apply` 6. In the output you should see the credentials you need to talk to Kubernetes API via kubectl (or other clients). @@ -85,7 +85,7 @@ The EKS layers need to be orchestrated in the following order: 5. Export AWS credentials 1. `export AWS_SHARED_CREDENTIALS_FILE="~/.aws/bb/credentials"` 2. `export AWS_CONFIG_FILE="~/.aws/bb/config"` -6. To generate `k8s-eks/cluster` layer `kubeconfig` file +6. To generate `k8s-eks/cluster` layer `kubeconfig` file 1. `export KUBECONFIG=~/.kube/bb/config-bb-devstg-k8s-eks` 2. `aws eks update-kubeconfig --region us-east-1 --name bb-apps-devstg-eks-1ry --profile bb-apps-devstg-devops` 3. Edit `~/.kube/bb/apps-devstg/config-bb-devstg-k8s-eks` and add the proper env vars to let kubeconfig notice the AWS creds path @@ -101,7 +101,7 @@ The EKS layers need to be orchestrated in the following order: 4. Place the kubeconfig in `~/.kube/bb/apps-devstg` and then use export `KUBECONFIG=~/.kube/bb/apps-devstg` to help tools like kubectl find a way to talk to the cluster (or `KUBECONFIG=~/.kube/bb/apps-devstg get pods --all-namespaces` ) 5. You should be now able to run kubectl commands (https://kubernetes.io/docs/reference/kubectl/cheatsheet/) -#### Example kubeconfig +#### Example kubeconfig ``` apiVersion: v1 clusters: @@ -139,10 +139,10 @@ users: value: /Users/exequielbarrirero/.aws/bb/credentials ``` -3. Identities layers - 1. The main files begin with the `ids_` prefix. - 1. They declare roles and their respective policies. - 2. The former are intended to be assumed by pods in your cluster through the EKS IRSA feature. +3. Identities layers + 1. The main files begin with the `ids_` prefix. + 1. They declare roles and their respective policies. + 2. The former are intended to be assumed by pods in your cluster through the EKS IRSA feature. 2. Go to this layer and run `leverage tf apply` ### K8s EKS Cluster Components and Workloads deployment diff --git a/apps-devstg/us-east-1/k8s-eks/cluster/eks-managed-nodes.tf b/apps-devstg/us-east-1/k8s-eks/cluster/eks-managed-nodes.tf index 01562b63d..0221e315c 100644 --- a/apps-devstg/us-east-1/k8s-eks/cluster/eks-managed-nodes.tf +++ b/apps-devstg/us-east-1/k8s-eks/cluster/eks-managed-nodes.tf @@ -137,11 +137,11 @@ module "cluster" { # Define all Managed Node Groups (MNG's) eks_managed_node_groups = { on-demand = { - min_size = 1 - max_size = 3 - desired_size = 1 - capacity_type = "ON_DEMAND" - instance_types = ["t2.medium", "t3.medium"] + min_size = 1 + max_size = 3 + desired_size = 1 + capacity_type = "ON_DEMAND" + instance_types = ["t2.medium", "t3.medium"] } spot = { desired_capacity = 1 From 7cd5973421fc8adca1242d522757b6afe31ff362 Mon Sep 17 00:00:00 2001 From: exequielrafaela Date: Fri, 1 Jul 2022 13:28:52 -0300 Subject: [PATCH 5/6] apps-devstg/us-east-1/k8s-eks/identities layer created with all the necesary EKS IRSA roles --- .../k8s-eks/identities/.terraform.lock.hcl | 42 +++ .../k8s-eks/identities/common-variables.tf | 1 + .../us-east-1/k8s-eks/identities/config.tf | 54 ++++ .../k8s-eks/identities/identity_providers.tf | 14 + .../identities/ids_aws_lb_controller.tf | 246 ++++++++++++++++++ .../k8s-eks/identities/ids_certmanager.tf | 55 ++++ .../identities/ids_cluster_autoscaler.tf | 62 +++++ .../ids_externaldns_aws.binbash.com.ar.tf | 109 ++++++++ .../us-east-1/k8s-eks/identities/locals.tf | 15 ++ .../us-east-1/k8s-eks/identities/outputs.tf | 24 ++ .../us-east-1/k8s-eks/identities/variables.tf | 4 + 11 files changed, 626 insertions(+) create mode 100644 apps-devstg/us-east-1/k8s-eks/identities/.terraform.lock.hcl create mode 120000 apps-devstg/us-east-1/k8s-eks/identities/common-variables.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/identities/config.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/identities/identity_providers.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/identities/ids_aws_lb_controller.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/identities/ids_certmanager.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/identities/ids_cluster_autoscaler.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/identities/ids_externaldns_aws.binbash.com.ar.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/identities/locals.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/identities/outputs.tf create mode 100644 apps-devstg/us-east-1/k8s-eks/identities/variables.tf diff --git a/apps-devstg/us-east-1/k8s-eks/identities/.terraform.lock.hcl b/apps-devstg/us-east-1/k8s-eks/identities/.terraform.lock.hcl new file mode 100644 index 000000000..0d21b6345 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/identities/.terraform.lock.hcl @@ -0,0 +1,42 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.21.0" + constraints = ">= 2.23.0, ~> 4.10" + hashes = [ + "h1:eeuRCgJ2aEsVvCl0UOU99Rx58L2NMpM4Q5XdsfO4sr8=", + "zh:16529a8ac663845da9214a75f5a32a2d0daf393612e46259b6dff10f1b8b50ed", + "zh:1ae36386d4862a489a3981a482a537c16f8a1588a445b60f173d1f13fcc3552e", + "zh:5ab0f63784f7216528855272b341d3cbfbf378dc6ee23796debead505aff58a2", + "zh:5f28fec15d2e58623b0cdb610e36703b3035fb3a61289c6d8a4705fca5144cb8", + "zh:60b664b6d34b27609b3b4273dffa41ff2c6d15bb01e326bcd6a40944f9cc9839", + "zh:6a9010783b1c4574956e047d9981e96f8d4bbdd7057496ad35bb81acc0efa862", + "zh:8631ceb0187605305e2045f1f6aded046ba17e0cad64663011dd55c8a20330ec", + "zh:891ac1b0053c435b939462b1872ab383e72a8de05454164def2b96a362f7a729", + "zh:92bccfd7517abeda2ac6ddb78f1819742cafdba87af2074929d57cd7f2256c22", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:ad169953f8b9441624064815bd4b82b12ab20ba3e2f033ecf019d6a25ae42175", + "zh:b46eccb3bec96ace8863cd0302de475dd22e4bdd2176ddb82e76f998424e7ac3", + ] +} + +provider "registry.terraform.io/hashicorp/tls" { + version = "3.4.0" + constraints = "~> 3.4.0" + hashes = [ + "h1:oyllIA9rNGCFtClSyBitUIzCXdnKtspVepdsvpLlfys=", + "zh:2442a0df0cfb550b8eba9b2af39ac06f54b62447eb369ecc6b1c29f739b33bbb", + "zh:3ebb82cacb677a099de55f844f0d02886bc804b1a2b94441bc40fabcb64d2a38", + "zh:436125c2a7e66bc62a4a7c68bdca694f071d7aa894e8637dc83f4a68fe322546", + "zh:5f03db9f1d77e8274ff4750ae32d5c16c42b862b06bcb0683e4d733c8db922e4", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:8190142ae8a539ab34193b7e75da0fa04035d1dcd8af8be94df1eafeeffb44b6", + "zh:8cdc7cd9221e27c189e5beaf78462fce4c2edb081f415a1eafc6da2949de31e2", + "zh:a5de0f7f5d63c59ebf61d3c1d94040f410665ff0aa04f66674efe24b39a11f94", + "zh:a9fce48db3c140cc3e06f8a3c7ef4d36735e457e7660442d6d5dcd2b0781adc3", + "zh:beb92de584c790c7c7f047e45ccd22b6ee3263c7b5a91ae4d6882ae6e7700570", + "zh:f373f8cc52846fb513f44f468d885f722ca4dc22af9ff1942368cafd16b796b3", + "zh:f69627fd6e5a920b17ff423cdbad2715078ca6d13146dc67668795582ab43748", + ] +} diff --git a/apps-devstg/us-east-1/k8s-eks/identities/common-variables.tf b/apps-devstg/us-east-1/k8s-eks/identities/common-variables.tf new file mode 120000 index 000000000..2f807a597 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/identities/common-variables.tf @@ -0,0 +1 @@ +../../../../config/common-variables.tf \ No newline at end of file diff --git a/apps-devstg/us-east-1/k8s-eks/identities/config.tf b/apps-devstg/us-east-1/k8s-eks/identities/config.tf new file mode 100644 index 000000000..9e4705d89 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/identities/config.tf @@ -0,0 +1,54 @@ +#=============================# +# AWS Provider Settings # +#=============================# +provider "aws" { + region = var.region + profile = var.profile +} + +provider "aws" { + alias = "shared" + region = var.region + profile = "${var.project}-shared-devops" +} + +#=============================# +# Backend Config (partial) # +#=============================# +terraform { + required_version = ">= 1.1.3" + + required_providers { + aws = "~> 4.10" + tls = "~> 3.4.0" + } + + backend "s3" { + key = "apps-devstg/k8s-eks/identities/terraform.tfstate" + } +} + +#=============================# +# Data sources # +#=============================# +data "terraform_remote_state" "eks-cluster" { + backend = "s3" + + config = { + region = var.region + profile = var.profile + bucket = var.bucket + key = "${var.environment}/k8s-eks/cluster/terraform.tfstate" + } +} + +data "terraform_remote_state" "shared-dns" { + backend = "s3" + + config = { + region = var.region + profile = "${var.project}-shared-devops" + bucket = "${var.project}-shared-terraform-backend" + key = "shared/dns/binbash.com.ar/terraform.tfstate" + } +} diff --git a/apps-devstg/us-east-1/k8s-eks/identities/identity_providers.tf b/apps-devstg/us-east-1/k8s-eks/identities/identity_providers.tf new file mode 100644 index 000000000..ec42256f8 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/identities/identity_providers.tf @@ -0,0 +1,14 @@ +# +# OIDC Provider needed for roles in the Shared account +# +resource "aws_iam_openid_connect_provider" "shared" { + provider = aws.shared + client_id_list = ["sts.amazonaws.com"] + thumbprint_list = [data.tls_certificate.shared.certificates[0].sha1_fingerprint] + url = data.terraform_remote_state.eks-cluster.outputs.cluster_oidc_issuer_url + tags = local.tags +} + +data "tls_certificate" "shared" { + url = data.terraform_remote_state.eks-cluster.outputs.cluster_oidc_issuer_url +} diff --git a/apps-devstg/us-east-1/k8s-eks/identities/ids_aws_lb_controller.tf b/apps-devstg/us-east-1/k8s-eks/identities/ids_aws_lb_controller.tf new file mode 100644 index 000000000..9f8e17414 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/identities/ids_aws_lb_controller.tf @@ -0,0 +1,246 @@ +# +# AWS Load Balancer Controller (ALB Ingress) Roles & Policies +# +module "role_aws_lb_controller" { + source = "github.com/binbashar/terraform-aws-iam.git//modules/iam-assumable-role-with-oidc?ref=v4.24.1" + + create_role = true + role_name = "${local.environment}-aws-lb-controller" + provider_url = replace(data.terraform_remote_state.eks-cluster.outputs.cluster_oidc_issuer_url, "https://", "") + + role_policy_arns = [ + aws_iam_policy.aws_lb_controller.arn + ] + oidc_fully_qualified_subjects = [ + "system:serviceaccount:alb-ingress:alb-ingress" + ] + + tags = local.tags_aws_lb_controller +} + +resource "aws_iam_policy" "aws_lb_controller" { + name = "${local.environment}-aws-lb-controller" + description = "AWS Load Balancer Controller" + tags = local.tags_aws_lb_controller + policy = < Date: Fri, 1 Jul 2022 13:30:11 -0300 Subject: [PATCH 6/6] apps-devstg/us-east-1/k8s-eks replacing common-variables.tf by the proper symlinked global var file --- .../k8s-eks/cluster/common-variables.tf | 160 +----------------- .../k8s-eks/network/common-variables.tf | 160 +----------------- 2 files changed, 2 insertions(+), 318 deletions(-) mode change 100644 => 120000 apps-devstg/us-east-1/k8s-eks/cluster/common-variables.tf mode change 100644 => 120000 apps-devstg/us-east-1/k8s-eks/network/common-variables.tf diff --git a/apps-devstg/us-east-1/k8s-eks/cluster/common-variables.tf b/apps-devstg/us-east-1/k8s-eks/cluster/common-variables.tf deleted file mode 100644 index 9508d114e..000000000 --- a/apps-devstg/us-east-1/k8s-eks/cluster/common-variables.tf +++ /dev/null @@ -1,159 +0,0 @@ -#================================# -# Common variables # -#================================# - -# -# config/backend.config -# -#================================# -# Terraform AWS Backend Settings # -#================================# -variable "region" { - type = string - description = "AWS Region" -} - -variable "profile" { - type = string - description = "AWS Profile (required by the backend but also used for other resources)" -} - -variable "bucket" { - type = string - description = "AWS S3 TF State Backend Bucket" -} - -variable "dynamodb_table" { - type = string - description = "AWS DynamoDB TF Lock state table name" -} - -variable "encrypt" { - type = bool - description = "Enable AWS DynamoDB with server side encryption" -} - -# -# config/base.config -# -#=============================# -# Project Variables # -#=============================# -variable "project" { - type = string - description = "Project Name" -} - -variable "project_long" { - type = string - description = "Project Long Name" -} - -variable "environment" { - type = string - description = "Environment Name" -} - -# -# config/extra.config -# -#=============================# -# Accounts & Extra Vars # -#=============================# -variable "region_primary" { - type = string - description = "AWS Primary Region for HA" -} - -variable "region_secondary" { - type = string - description = "AWS Scondary Region for HA" -} - -variable "accounts" { - type = map(any) - description = "Accounts descriptions" -} - -variable "root_account_id" { - type = string - description = "Account: Root" -} - -variable "security_account_id" { - type = string - description = "Account: Security & Users Management" -} - -variable "shared_account_id" { - type = string - description = "Account: Shared Resources" -} - -variable "network_account_id" { - type = string - description = "Account: Networking Resources" -} - -variable "appsdevstg_account_id" { - type = string - description = "Account: Dev Modules & Libs" -} - -variable "appsprd_account_id" { - type = string - description = "Account: Prod Modules & Libs" -} - -variable "vault_address" { - type = string - description = "Vault Address" -} - -variable "vault_token" { - type = string - description = "Vault Token" -} - -#=============================# -# AWS SSO Variables # -#=============================# -variable "sso_role" { - description = "SSO Role Name" -} - -variable "sso_enabled" { - type = string - description = "Enable SSO Service" -} - -variable "sso_region" { - type = string - description = "SSO Region" -} - -variable "sso_start_url" { - type = string - description = "SSO Start Url" -} - -#===========================================# -# Networking # -#===========================================# -variable "enable_tgw" { - description = "Enable Transit Gateway Support" - type = bool - default = false -} - -variable "enable_tgw_multi_region" { - description = "Enable Transit Gateway multi region support" - type = bool - default = false -} - -variable "tgw_cidrs" { - description = "CIDRs to be added as routes to public RT" - type = list(string) - default = [] -} diff --git a/apps-devstg/us-east-1/k8s-eks/cluster/common-variables.tf b/apps-devstg/us-east-1/k8s-eks/cluster/common-variables.tf new file mode 120000 index 000000000..2f807a597 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/cluster/common-variables.tf @@ -0,0 +1 @@ +../../../../config/common-variables.tf \ No newline at end of file diff --git a/apps-devstg/us-east-1/k8s-eks/network/common-variables.tf b/apps-devstg/us-east-1/k8s-eks/network/common-variables.tf deleted file mode 100644 index 9508d114e..000000000 --- a/apps-devstg/us-east-1/k8s-eks/network/common-variables.tf +++ /dev/null @@ -1,159 +0,0 @@ -#================================# -# Common variables # -#================================# - -# -# config/backend.config -# -#================================# -# Terraform AWS Backend Settings # -#================================# -variable "region" { - type = string - description = "AWS Region" -} - -variable "profile" { - type = string - description = "AWS Profile (required by the backend but also used for other resources)" -} - -variable "bucket" { - type = string - description = "AWS S3 TF State Backend Bucket" -} - -variable "dynamodb_table" { - type = string - description = "AWS DynamoDB TF Lock state table name" -} - -variable "encrypt" { - type = bool - description = "Enable AWS DynamoDB with server side encryption" -} - -# -# config/base.config -# -#=============================# -# Project Variables # -#=============================# -variable "project" { - type = string - description = "Project Name" -} - -variable "project_long" { - type = string - description = "Project Long Name" -} - -variable "environment" { - type = string - description = "Environment Name" -} - -# -# config/extra.config -# -#=============================# -# Accounts & Extra Vars # -#=============================# -variable "region_primary" { - type = string - description = "AWS Primary Region for HA" -} - -variable "region_secondary" { - type = string - description = "AWS Scondary Region for HA" -} - -variable "accounts" { - type = map(any) - description = "Accounts descriptions" -} - -variable "root_account_id" { - type = string - description = "Account: Root" -} - -variable "security_account_id" { - type = string - description = "Account: Security & Users Management" -} - -variable "shared_account_id" { - type = string - description = "Account: Shared Resources" -} - -variable "network_account_id" { - type = string - description = "Account: Networking Resources" -} - -variable "appsdevstg_account_id" { - type = string - description = "Account: Dev Modules & Libs" -} - -variable "appsprd_account_id" { - type = string - description = "Account: Prod Modules & Libs" -} - -variable "vault_address" { - type = string - description = "Vault Address" -} - -variable "vault_token" { - type = string - description = "Vault Token" -} - -#=============================# -# AWS SSO Variables # -#=============================# -variable "sso_role" { - description = "SSO Role Name" -} - -variable "sso_enabled" { - type = string - description = "Enable SSO Service" -} - -variable "sso_region" { - type = string - description = "SSO Region" -} - -variable "sso_start_url" { - type = string - description = "SSO Start Url" -} - -#===========================================# -# Networking # -#===========================================# -variable "enable_tgw" { - description = "Enable Transit Gateway Support" - type = bool - default = false -} - -variable "enable_tgw_multi_region" { - description = "Enable Transit Gateway multi region support" - type = bool - default = false -} - -variable "tgw_cidrs" { - description = "CIDRs to be added as routes to public RT" - type = list(string) - default = [] -} diff --git a/apps-devstg/us-east-1/k8s-eks/network/common-variables.tf b/apps-devstg/us-east-1/k8s-eks/network/common-variables.tf new file mode 120000 index 000000000..2f807a597 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-eks/network/common-variables.tf @@ -0,0 +1 @@ +../../../../config/common-variables.tf \ No newline at end of file