From 8fee4dd93d38be42a98c4226b1c37d1e3e3e656c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Mat=C3=ADas=20Kungfoo=20de=20la=20C=C3=A1mara=20Beo?= =?UTF-8?q?vide?= Date: Wed, 3 Jul 2024 17:06:32 -0300 Subject: [PATCH] FEATURE/ added new KOPS K8s layer (#600) * FEATURE/ added new KOPS K8s layer * FEATURE/ improved docs * FEATURE/ fixed var name and value --- apps-devstg/us-east-1/k8s-kops/.gitignore | 1 + .../k8s-kops/1-prerequisites/config.tf | 53 +++++ .../k8s-kops/1-prerequisites/data.tf | 3 + .../us-east-1/k8s-kops/1-prerequisites/dns.tf | 57 +++++ .../k8s-kops/1-prerequisites/locals.tf | 59 +++++ .../k8s-kops/1-prerequisites/outputs.tf | 160 +++++++++++++ .../us-east-1/k8s-kops/1-prerequisites/s3.tf | 188 ++++++++++++++++ .../1-prerequisites/terraform.auto.tfvars | 3 + .../k8s-kops/1-prerequisites/variables.tf | 98 ++++++++ .../us-east-1/k8s-kops/2-kops/.gitignore | 10 + .../us-east-1/k8s-kops/2-kops/Makefile | 73 ++++++ .../k8s-kops/2-kops/cluster-config.sh | 50 ++++ .../k8s-kops/2-kops/cluster-template.sh | 18 ++ .../k8s-kops/2-kops/cluster-template.yml | 207 +++++++++++++++++ .../k8s-kops/2-kops/cluster-update.sh | 74 ++++++ .../us-east-1/k8s-kops/2-kops/config.tf | 33 +++ .../us-east-1/k8s-kops/2-kops/values.yaml | 1 + .../us-east-1/k8s-kops/3-extras/README.md | 3 + .../3-extras/chart-values/externaldns.yaml | 36 +++ .../3-extras/chart-values/traefik.yaml | 54 +++++ .../k8s-kops/3-extras/common-variables.tf | 1 + .../us-east-1/k8s-kops/3-extras/config.tf | 53 +++++ .../k8s-kops/3-extras/external-dns.tf | 31 +++ .../us-east-1/k8s-kops/3-extras/locals.tf | 7 + .../k8s-kops/3-extras/route53record.tf | 39 ++++ .../k8s-kops/3-extras/terraform.auto.tfvars | 5 + .../us-east-1/k8s-kops/3-extras/traefik.tf | 19 ++ .../us-east-1/k8s-kops/3-extras/variables.tf | 15 ++ apps-devstg/us-east-1/k8s-kops/README.md | 213 ++++++++++++++++++ apps-devstg/us-east-1/k8s-kops/TODO.md | 4 + 30 files changed, 1568 insertions(+) create mode 100644 apps-devstg/us-east-1/k8s-kops/.gitignore create mode 100644 apps-devstg/us-east-1/k8s-kops/1-prerequisites/config.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/1-prerequisites/data.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/1-prerequisites/dns.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/1-prerequisites/locals.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/1-prerequisites/outputs.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/1-prerequisites/s3.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/1-prerequisites/terraform.auto.tfvars create mode 100644 apps-devstg/us-east-1/k8s-kops/1-prerequisites/variables.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/2-kops/.gitignore create mode 100644 apps-devstg/us-east-1/k8s-kops/2-kops/Makefile create mode 100755 apps-devstg/us-east-1/k8s-kops/2-kops/cluster-config.sh create mode 100755 apps-devstg/us-east-1/k8s-kops/2-kops/cluster-template.sh create mode 100644 apps-devstg/us-east-1/k8s-kops/2-kops/cluster-template.yml create mode 100755 apps-devstg/us-east-1/k8s-kops/2-kops/cluster-update.sh create mode 100644 apps-devstg/us-east-1/k8s-kops/2-kops/config.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/2-kops/values.yaml create mode 100644 apps-devstg/us-east-1/k8s-kops/3-extras/README.md create mode 100644 apps-devstg/us-east-1/k8s-kops/3-extras/chart-values/externaldns.yaml create mode 100644 apps-devstg/us-east-1/k8s-kops/3-extras/chart-values/traefik.yaml create mode 120000 apps-devstg/us-east-1/k8s-kops/3-extras/common-variables.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/3-extras/config.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/3-extras/external-dns.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/3-extras/locals.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/3-extras/route53record.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/3-extras/terraform.auto.tfvars create mode 100644 apps-devstg/us-east-1/k8s-kops/3-extras/traefik.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/3-extras/variables.tf create mode 100644 apps-devstg/us-east-1/k8s-kops/README.md create mode 100644 apps-devstg/us-east-1/k8s-kops/TODO.md diff --git a/apps-devstg/us-east-1/k8s-kops/.gitignore b/apps-devstg/us-east-1/k8s-kops/.gitignore new file mode 100644 index 000000000..c970b586c --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/.gitignore @@ -0,0 +1 @@ +*k8s.local diff --git a/apps-devstg/us-east-1/k8s-kops/1-prerequisites/config.tf b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/config.tf new file mode 100644 index 000000000..3ee0c66ab --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/config.tf @@ -0,0 +1,53 @@ +# Providers +provider "aws" { + region = var.region_primary + profile = var.profile +} + +#replica provider +provider "aws" { + alias = "region_secondary" + region = var.region_secondary + profile = var.profile +} + +provider "aws" { + alias = "shared" + region = var.region + profile = "${var.project}-shared-devops" +} + +# Backend Config (partial) +terraform { + required_version = "~> 1.3" + + required_providers { + aws = "~> 4.10" + } + + backend "s3" { + key = "apps-devstg/ca-central-1/k8s-kops/1-prerequisites/terraform.tfstate" + } +} + +data "terraform_remote_state" "vpc" { + backend = "s3" + + config = { + region = var.region + profile = var.profile + bucket = var.bucket + key = "${var.environment}/ca-central-1/kops-network/terraform.tfstate" + } +} + +data "terraform_remote_state" "vpc-shared" { + backend = "s3" + + config = { + region = var.region + profile = "${var.project}-shared-devops" + bucket = "${var.project}-shared-terraform-backend" + key = "shared/us-east-1/network/terraform.tfstate" + } +} diff --git a/apps-devstg/us-east-1/k8s-kops/1-prerequisites/data.tf b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/data.tf new file mode 100644 index 000000000..5bff4c67d --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/data.tf @@ -0,0 +1,3 @@ +data "aws_iam_roles" "devopsrole" { + name_regex = ".*AWSReservedSSO_DevOps.*" +} diff --git a/apps-devstg/us-east-1/k8s-kops/1-prerequisites/dns.tf b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/dns.tf new file mode 100644 index 000000000..63ec9565e --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/dns.tf @@ -0,0 +1,57 @@ +# +# Private Hosted Zone for this k8s cluster +# +resource "aws_route53_zone" "cluster_domain" { + count = local.gossip_cluster ? 0 : 1 + + name = local.k8s_cluster_name + + vpc { + vpc_id = data.terraform_remote_state.vpc.outputs.vpc_id + vpc_region = var.region + } + + # + # This Remote Account VPCs are added as a post step after the local-exec assoc occurs. + # If you won't like to add them please consider the below workaround + # Had to add this ignore override because of the cross-vpc resolution + # between shared and vpc-dev + # between shared and vpc-dev-eks + # + #lifecycle { + # ignore_changes = [ + # vpc, + # ] + # } + + ## IMPORTANT!!! ## + # Needs to be uncommented after the -> resource "null_resource" "create_remote_zone_auth" + # is established + vpc { + vpc_id = data.terraform_remote_state.vpc-shared.outputs.vpc_id + vpc_region = var.region + } +} + +# +# DNS/VPC association between Shared VPC and cluster-kops-1.k8s.devstg.binbash.aws +# + +# Authorize association from the owner account of the Private Zone +resource "aws_route53_vpc_association_authorization" "with_shared_vpc" { + count = local.gossip_cluster ? 0 : 1 + + vpc_id = data.terraform_remote_state.vpc-shared.outputs.vpc_id + zone_id = aws_route53_zone.cluster_domain[0].zone_id +} + + +# Complete the association from the owner account of the VPC +resource "aws_route53_zone_association" "with_shared_vpc" { + count = local.gossip_cluster ? 0 : 1 + + provider = aws.shared + + vpc_id = aws_route53_vpc_association_authorization.with_shared_vpc[0].vpc_id + zone_id = aws_route53_vpc_association_authorization.with_shared_vpc[0].zone_id +} diff --git a/apps-devstg/us-east-1/k8s-kops/1-prerequisites/locals.tf b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/locals.tf new file mode 100644 index 000000000..b23f88c97 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/locals.tf @@ -0,0 +1,59 @@ +locals { + tags = { + Terraform = "true" + Environment = var.environment + } + + # We'll use a shorter environment name in order to keep things simple + short_environment = replace(var.environment, "apps-", "") + + # The name of the cluster + # if gossip_cluster then base_domain_name must be k8s.local + gossip_cluster = true + base_domain_name = "k8s.local" + k8s_cluster_name = "canada01-kops.${local.short_environment}.${local.base_domain_name}" + + # The kubernetes version + k8s_cluster_version = "1.28.9" + + # The etcd version + # Ref1: https://github.com/kubernetes/kops/blob/master/docs/cluster_spec.md#etcdclusters-v3--tls + # Ref2: https://github.com/etcd-io/etcd/releases + etcd_clusters_version = "3.5.9" + + # The Calico Network CNI version + # Ref1: https://github.com/kubernetes/kops/blob/master/docs/calico-v3.md + # Ref2: https://itnext.io/benchmark-results-of-kubernetes-network-plugins-cni-over-10gbit-s-network-36475925a560 + networking_calico_major_version = "v3" + + # Kops AMI Identifier + # check image in https://cloud-images.ubuntu.com/locator/ec2/ , look for your zone. + kops_ami_id = "ami-04fea581fe25e2675" + + # Tags that will be applied to all K8s Kops cluster instances + cluster_tags = { + "kubernetes.io/cluster/${local.k8s_cluster_name}" = "owned" + } + + # Tags that will be applied to all K8s Kops Worker nodes + node_cloud_labels = {} + + # K8s Kops Master Nodes Machine (EC2) type and size + ASG Min-Max per AZ + # then min/max = 1 will create 1 Master Node x AZ => 3 x Masters + kops_master_machine_type = "t3.medium" + kops_master_machine_max_size = 1 + kops_master_machine_min_size = 1 + + # K8s Kops Worker Nodes Machine (EC2) type and size + ASG Min-Max + kops_worker_machine_type = "t3.medium" + kops_worker_machine_max_size = 5 + kops_worker_machine_min_size = 1 + # If you use Karpenter set the list of types here + kops_worker_machine_types_karpenter = ["t2.medium", "t2.large", "t3.medium", "t3.large", "t3a.medium", "t3a.large", "m4.large"] + + # master nodes AZs + # number_of_cluster_master_azs is 1, 2 or 3 + # master nodes will be deployed in these AZs + number_of_cluster_master_azs = 1 + cluster_master_azs = slice(data.terraform_remote_state.vpc.outputs.availability_zones, 0, local.number_of_cluster_master_azs ) +} diff --git a/apps-devstg/us-east-1/k8s-kops/1-prerequisites/outputs.tf b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/outputs.tf new file mode 100644 index 000000000..06b32cc1e --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/outputs.tf @@ -0,0 +1,160 @@ +# +# Cluster Settings +# +output "project_short" { + description = "Project Short Name" + value = var.project +} +output "profile" { + description = "AWS Profile" + value = var.profile +} +output "region" { + description = "AWS Region" + value = var.region +} +output "environment" { + description = "AWS Region" + value = var.environment +} +output "cluster_name" { + description = "The name of this cluster" + value = local.k8s_cluster_name +} +output "cluster_version" { + description = "Kubernetes version" + value = local.k8s_cluster_version +} +output "etcd_clusters_version" { + description = "etcd version" + value = local.etcd_clusters_version +} +output "networking_calico_major_version" { + description = "Calico network CNI major version" + value = local.networking_calico_major_version +} + +# +# Cluster Master Instance Group (IG) +# +output "cluster_master_azs" { + description = "Availability Zones where masters will be deployed" + value = local.cluster_master_azs +} +output "cluster_api_elb_extra_security_group" { + value = "" +} +output "kops_master_machine_type" { + description = "K8s Kops Master Nodes Machine (EC2) type and size" + value = local.kops_master_machine_type +} +output "kops_master_machine_max_size" { + description = "K8s Kops Master Nodes ASG max size" + value = local.kops_master_machine_max_size +} +output "kops_master_machine_min_size" { + description = "K8s Kops Master Nodes ASG min size" + value = local.kops_master_machine_min_size +} + +# +# Cluster Worker Nodes Instance Group (IG) +# +output "node_cloud_labels" { + description = "Cloud labels will become tags on the nodes" + value = local.node_cloud_labels +} +output "kops_worker_machine_type" { + description = "K8s Kops Worker Nodes Machine (EC2) type and size" + value = local.kops_worker_machine_type +} +output "kops_worker_machine_max_size" { + description = "K8s Kops Worker Nodes ASG max size" + value = local.kops_worker_machine_max_size +} +output "kops_worker_machine_min_size" { + description = "K8s Kops Worker Nodes ASG min size" + value = local.kops_worker_machine_min_size +} + +# +# Kops Resources +# +output "kops_s3_bucket" { + description = "Kops State S3 Bucket" + value = aws_s3_bucket.kops_state.bucket +} +output "kops_ami_id" { + description = "Kops AMI ID" + value = local.kops_ami_id +} + +# +# Network Resources +# +output "gossip_cluster" { + description = "Wheter is a gossip cluster" + value = local.gossip_cluster +} +output "hosted_zone_id" { + description = "Hosted Zone ID (Kops requires a domain for the cluster)" + value = local.gossip_cluster ? null : aws_route53_zone.cluster_domain[0].zone_id +} +output "vpc_id" { + value = data.terraform_remote_state.vpc.outputs.vpc_id +} +output "vpc_cidr_block" { + value = data.terraform_remote_state.vpc.outputs.vpc_cidr_block +} +output "availability_zones" { + value = data.terraform_remote_state.vpc.outputs.availability_zones +} +output "public_subnet_ids" { + value = zipmap( + data.terraform_remote_state.vpc.outputs.availability_zones, + data.terraform_remote_state.vpc.outputs.public_subnets + ) +} +output "private_subnet_ids" { + value = zipmap( + data.terraform_remote_state.vpc.outputs.availability_zones, + data.terraform_remote_state.vpc.outputs.private_subnets + ) +} +output "nat_gateway_ids" { + value = zipmap( + data.terraform_remote_state.vpc.outputs.availability_zones, [for az in data.terraform_remote_state.vpc.outputs.availability_zones: data.terraform_remote_state.vpc.outputs.nat_gateway_ids[0]] + ) +} +output "shared_vpc_cidr_block" { + value = data.terraform_remote_state.vpc-shared.outputs.vpc_cidr_block +} +output "ssh_pub_key_path" { + value = var.ssh_pub_key_path +} + +output "devops_role" { + value = tolist(data.aws_iam_roles.devopsrole.arns)[0] +} + +# +# IRSA +# +output "irsa_enabled" { + value = var.enable_irsa +} +output "irsa_bucket_name" { + value = var.enable_irsa ? aws_s3_bucket.kops_irsa[0].id : "" +} + +# +# KARPENTER +# +output "karpenter_enabled" { + value = var.enable_irsa && var.enable_karpenter +} + +output "kops_worker_machine_types_karpenter" { + description = "List of K8s Kops Worker Nodes Machine (EC2) types and size for Karpenter" + value = local.kops_worker_machine_types_karpenter +} diff --git a/apps-devstg/us-east-1/k8s-kops/1-prerequisites/s3.tf b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/s3.tf new file mode 100644 index 000000000..c34fb2dc6 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/s3.tf @@ -0,0 +1,188 @@ +data "aws_caller_identity" "current" {} + +# ############################################################################## +# +# STATE +# +# Bucket used to store Kops state +# +resource "aws_s3_bucket" "kops_state" { + bucket = "${var.project}-state-${replace(local.k8s_cluster_name, ".", "-")}" + + #acl = "private" + + lifecycle { + prevent_destroy = false + } + + tags = local.tags +} + +resource "aws_s3_bucket_policy" "kops_state" { + bucket = aws_s3_bucket.kops_state.id + policy = data.aws_iam_policy_document.kops_bucket_policy.json +} + +resource "aws_s3_bucket_versioning" "kops_state" { + bucket = aws_s3_bucket.kops_state.id + versioning_configuration { + status = "Enabled" + } +} + +#resource "aws_s3_bucket_acl" "kops_state" { +# bucket = aws_s3_bucket.kops_state.id +# acl = "private" +#} + +resource "aws_s3_bucket_server_side_encryption_configuration" "kops_state" { + bucket = aws_s3_bucket.kops_state.id + + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} + +# +# S3 State Bucket Policy +# +data "aws_iam_policy_document" "kops_bucket_policy" { + statement { + sid = "EnforceSSlRequestsOnly" + + effect = "Deny" + + principals { + type = "AWS" + identifiers = ["*"] + } + + actions = [ + "s3:*", + ] + + resources = [ + "${aws_s3_bucket.kops_state.arn}/*", + ] + + # + # Check for a condition that always requires ssl communications + condition { + test = "Bool" + variable = "aws:SecureTransport" + values = ["false"] + } + } + statement { + sid = "AllowPrivate" + + effect = "Allow" + + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + } + + actions = [ + "s3:*", + ] + + resources = [ + "${aws_s3_bucket.kops_state.arn}", + "${aws_s3_bucket.kops_state.arn}/*" + ] + } +} +# ############################################################################## +# IRSA +# To use the IRSA bucket you should access policy access to bucket in security-base layer +# +# Bucket used to store Kops irsa +# +resource "aws_s3_bucket" "kops_irsa" { + count = var.enable_irsa ? 1 : 0 + + bucket = "${var.project}-irsa-${replace(local.k8s_cluster_name, ".", "-")}" + + lifecycle { + prevent_destroy = false + } + + tags = local.tags +} + +resource "aws_s3_bucket_policy" "kops_irsa" { + count = var.enable_irsa ? 1 : 0 + + bucket = aws_s3_bucket.kops_irsa[0].id + policy = data.aws_iam_policy_document.kops_irsa_bucket_policy[0].json +} + +resource "aws_s3_bucket_versioning" "kops_irsa" { + count = var.enable_irsa ? 1 : 0 + + bucket = aws_s3_bucket.kops_irsa[0].id + versioning_configuration { + status = "Disabled" + } +} + +resource "aws_s3_bucket_public_access_block" "kops_irsa" { + count = var.enable_irsa ? 1 : 0 + + bucket = aws_s3_bucket.kops_irsa[0].id + + block_public_acls = true + block_public_policy = false + ignore_public_acls = true + restrict_public_buckets = false +} + +data "aws_iam_policy_document" "kops_irsa_bucket_policy" { + count = var.enable_irsa ? 1 : 0 + + statement { + sid = "EnforceSSlRequestsOnly" + + effect = "Deny" + + principals { + type = "AWS" + identifiers = ["*"] + } + + actions = [ + "s3:*", + ] + + resources = [ + "${aws_s3_bucket.kops_irsa[0].arn}/*", + ] + + # + # Check for a condition that always requires ssl communications + condition { + test = "Bool" + variable = "aws:SecureTransport" + values = ["false"] + } + } + statement { + sid = "PublicReadOnly" + + principals { + type = "AWS" + identifiers = ["*"] + } + + actions = [ + "s3:GetObject" + ] + + resources = [ + "${aws_s3_bucket.kops_irsa[0].arn}/*", + ] + } +} diff --git a/apps-devstg/us-east-1/k8s-kops/1-prerequisites/terraform.auto.tfvars b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/terraform.auto.tfvars new file mode 100644 index 000000000..eea9f61f9 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/terraform.auto.tfvars @@ -0,0 +1,3 @@ +ssh_pub_key_path="~/.ssh/bb/id_rsa" +enable_irsa=true +enable_karpenter=true diff --git a/apps-devstg/us-east-1/k8s-kops/1-prerequisites/variables.tf b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/variables.tf new file mode 100644 index 000000000..fa334247c --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/1-prerequisites/variables.tf @@ -0,0 +1,98 @@ +# +# config/backend.config +# +#================================# +# Terraform AWS Backend Settings # +#================================# +variable "project" { + type = string + description = "Project Short Name" +} + +variable "region" { + type = string + description = "AWS Region" +} + +variable "canada_region_primary" { + type = string + description = "AWS Region for Canada" +} + +variable "profile" { + type = string + description = "AWS Profile (required by the backend but also used for other resources)" +} + +variable "bucket" { + type = string + description = "AWS S3 TF State Backend Bucket" +} + +variable "dynamodb_table" { + type = string + description = "AWS DynamoDB TF Lock state table name" +} + +variable "encrypt" { + type = bool + description = "Enable AWS DynamoDB with server side encryption" +} + +variable "ssh_pub_key_path" { + type = string + description = "The path to the public SSH key you want to use for the KOPS cluster" +} +# +# config/base.config +# +#=============================# +# Project Variables # +#=============================# +variable "project_long" { + type = string + description = "Project Long Name" +} + +variable "environment" { + type = string + description = "Environment Name" +} + +# +# config/extra.config +# +#=============================# +# Accounts & Extra Vars # +#=============================# +variable "region_secondary" { + type = string + description = "AWS Scondary Region for HA" +} + +#===========================================# +# DNS # +#===========================================# +variable "vpc_shared_dns_assoc" { + description = "true if Shared account VPC exists and needs DNS association" + type = bool + default = true +} + +#===========================================# +# IRSA # +#===========================================# +variable "enable_irsa" { + description = "Whether to activate IRSA in KOPS cluster (To use the IRSA bucket you should set 'block_public_policy = false' in security-base layer)" + type = bool + default = false +} + +#===========================================# +# KARPENTER # +#===========================================# +variable "enable_karpenter" { + description = "Whether to activate Karpenter in KOPS cluster (IRSA has to be enabled)" + type = bool + default = false +} diff --git a/apps-devstg/us-east-1/k8s-kops/2-kops/.gitignore b/apps-devstg/us-east-1/k8s-kops/2-kops/.gitignore new file mode 100644 index 000000000..048bbb578 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/2-kops/.gitignore @@ -0,0 +1,10 @@ +# +# Consider updating (commenting) this .gitignore lines in order to get the +# below files versioned in your git repo. +# +!.gitignore +data/ +!data/.gitkeep +cluster.yml +kubernetes.tf.example + diff --git a/apps-devstg/us-east-1/k8s-kops/2-kops/Makefile b/apps-devstg/us-east-1/k8s-kops/2-kops/Makefile new file mode 100644 index 000000000..80e5101ca --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/2-kops/Makefile @@ -0,0 +1,73 @@ +.PHONY: help +SHELL := /bin/bash + +help: + @echo 'Available Commands:' + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":"}; { if ($$3 == "") { printf " - \033[36m%-18s\033[0m %s\n", $$1, $$2 } else { printf " - \033[36m%-18s\033[0m %s\n", $$2, $$3 }}' + +cluster-template: ## Kops update the cluster template only + @./cluster-template.sh + +cluster-update: ## Kops update cluster state from cluster manifest + @./cluster-update.sh + +cluster-get: ## Kops get cluster info + @source cluster-config.sh && kops get all $$CLUSTER_NAME \ + --state $$ClUSTER_STATE + +cluster-destroy-yes: ## Kops destroy cluster + source cluster-config.sh && kops delete cluster \ + --state $$ClUSTER_STATE \ + --name $$CLUSTER_NAME \ + --unregister \ + --yes + +kops-cmd: ## Kops custom cmd, eg make KOPS_CMD="get secrets" kops-cmd + @source cluster-config.sh && kops ${KOPS_CMD} \ + --state $$ClUSTER_STATE \ + --name $$CLUSTER_NAME + +kops-kubeconfig: ## Kops export kubeconfig + source cluster-config.sh && kops export kubeconfig $$CLUSTER_NAME ${KOPS_CMD} \ + --state $$ClUSTER_STATE --kubeconfig $$CLUSTER_NAME --admin + +cluster-validate: ## Kops validate cluster against the current state + @source cluster-config.sh && kops validate cluster \ + --state $$ClUSTER_STATE \ + --name $$CLUSTER_NAME + +cluster-rolling-update: ## Kops perform a rolling update on the cluster -- only dry run, no changes will be applied + @source cluster-config.sh && kops rolling-update cluster \ + --state $$ClUSTER_STATE \ + --name $$CLUSTER_NAME + +cluster-rolling-update-yes: ## Kops perform a rolling update on the cluster + @source cluster-config.sh && kops rolling-update cluster \ + --state $$ClUSTER_STATE \ + --name $$CLUSTER_NAME \ + --yes + +# ######################### +# +# +#cluster-rolling-update-yes-force-masters: ## Kops perform a rolling update on the cluster masters +# source cluster-config.sh && kops rolling-update cluster \ +# --state $$ClUSTER_STATE \ +# --name $$CLUSTER_NAME \ +# --instance-group-roles=Master \ +# --yes \ +# --force +# +#cluster-destroy: ## Kops destroy cluster -- only dry run, no changes will be applied +# source cluster-config.sh && kops delete cluster \ +# --state $$ClUSTER_STATE \ +# --name $$CLUSTER_NAME \ +# --unregister +# +#cluster-destroy-yes: ## Kops destroy cluster +# source cluster-config.sh && kops delete cluster \ +# --state $$ClUSTER_STATE \ +# --name $$CLUSTER_NAME \ +# --unregister \ +# --yes +# diff --git a/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-config.sh b/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-config.sh new file mode 100755 index 000000000..1693de902 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-config.sh @@ -0,0 +1,50 @@ +#!/bin/bash +set -e -o pipefail + +# +# Pre-requisites validation +# +KOPS_VER="1.28.4" + +if [[ $(kops version | grep ${KOPS_VER}) == *${KOPS_VER}* ]] ; then + echo "Kops Version ${KOPS_VER}" +else + echo "ERROR: Kops Version ${KOPS_VER} binary not found in PATH, or is not executable" + echo "" + echo "To download locally, run:" + echo "curl -o kops -LO https://github.com/kubernetes/kops/releases/download/${KOPS_VER}/kops-linux-amd64" + echo "chmod +x ./kops" + echo "sudo mv ./kops /usr/local/bin/" + echo "" + exit 1 +fi + +# +# Get terraform output and parse terraform output values +TF_OUTPUT=$(cd ../1-prerequisites/ && leverage tf output -json | sed -E '/(^\[[0-9]|^ +INFO)/d') +VALUES_FILE="values.yaml" +echo ${TF_OUTPUT} > values.yaml +PROJECT_SHORT=$(echo ${TF_OUTPUT} | jq -r '.project_short.value') +ENV=$(echo ${TF_OUTPUT} | jq -r '.environment.value') +CLUSTER_TEMPLATE="cluster-template.yml" +CLUSTER_FILE="cluster.yml" +SSH_PUBLIC_KEY=$(echo ${TF_OUTPUT} | jq -r '.ssh_pub_key_path.value') +CLUSTER_NAME="$(echo ${TF_OUTPUT} | jq -r '.cluster_name.value')" +# kops S3 Bucket state to get the imported cluster.yaml definition +ClUSTER_STATE="s3://$(echo ${TF_OUTPUT} | jq -r '.kops_s3_bucket.value')" + +# +# Export a kubecfg file for a cluster from the state store. +# The configuration will be saved into a users $HOME/.kube/config file. +# To export the kubectl configuration to a specific file set the +# KUBECONFIG environment variable. +# +CLUSTER_KUBECONFIG="${HOME}/.kube/${PROJECT_SHORT}/${CLUSTER_NAME}" +export KUBECONFIG="${CLUSTER_KUBECONFIG}" + +# Export AWS credentials for kops to use them +export AWS_SDK_LOAD_CONFIG=1 +export AWS_SHARED_CREDENTIALS_FILE="${HOME}/.aws/${PROJECT_SHORT}/credentials" +export AWS_CONFIG_FILE="${HOME}/.aws/${PROJECT_SHORT}/config" +export AWS_PROFILE="$(echo ${TF_OUTPUT} | jq -r '.profile.value')" +export AWS_REGION="$(echo ${TF_OUTPUT} | jq -r '.region.value')" diff --git a/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-template.sh b/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-template.sh new file mode 100755 index 000000000..1e0baff29 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-template.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -e -o pipefail + +# +# Import common cluster config +# +source cluster-config.sh + +# +# Create/update the kops manifest from terraform output values +# +kops toolbox template \ + --name ${CLUSTER_NAME} \ + --values ${VALUES_FILE} \ + --template ${CLUSTER_TEMPLATE} \ + --format-yaml > ${CLUSTER_FILE} + +echo "You can see the final template for cluster ${CLUSTER_NAME} in file ${CLUSTER_FILE}." diff --git a/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-template.yml b/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-template.yml new file mode 100644 index 000000000..fdc3d9e1d --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-template.yml @@ -0,0 +1,207 @@ +# Ref1: https://github.com/kubernetes/kops/blob/master/docs/cluster_spec.md +# Ref2: https://github.com/kubernetes/kops/blob/master/docs/manifests_and_customizing_via_api.md +apiVersion: kops/v1alpha2 +kind: Cluster +metadata: + name: {{ .cluster_name.value }} +spec: + api: + # + # When configuring a LoadBalancer, you can also choose to have a public ELB + # or an internal (VPC only) ELB. The type field should be Public or Internal. + # + loadBalancer: + type: Internal + class : Network + additionalSecurityGroups: [ {{ .cluster_api_elb_extra_security_group.value }} ] + authentication: + aws: + backendMode: CRD + clusterId: {{ .cluster_name.value }} + identityMappings: + - arn: {{ .devops_role.value }} + username: admin:{{"{{"}} SessionName {{"}}"}} + groups: + - system:masters + authorization: + rbac: {} + channel: stable + cloudLabels: + Environment: {{ .environment.value }} + Provisioner: kops + Service: kubernetes + Backup: "True" + cloudProvider: aws + configBase: s3://{{ .kops_s3_bucket.value }}/{{ .cluster_name.value }} + {{ if not .gossip_cluster }} + dnsZone: {{ .hosted_zone_id.value }} + {{ end }} + # + # This block contains configurations for kube-dns. + # + kubeDNS: + provider: KubeDNS + # + # Define etcd members (as many as masters were defined) + # + etcdClusters: + - etcdMembers: + {{ range $i, $az := .cluster_master_azs.value }} + - instanceGroup: master-{{ . }} + name: {{ . | replace $.region.value "" }} + encryptedVolume: true + {{ end }} + name: main + version: {{ .etcd_clusters_version.value }} + - etcdMembers: + {{ range $i, $az := .cluster_master_azs.value }} + - instanceGroup: master-{{ . }} + name: {{ . | replace $.region.value "" }} + encryptedVolume: true + {{ end }} + name: events + version: {{ .etcd_clusters_version.value }} + # + # This array configures the CIDRs that are able to access the kubernetes API. + # On AWS this is manifested as inbound security group rules on the ELB or + # master security groups. + # + kubernetesApiAccess: + - {{ .shared_vpc_cidr_block.value }} + kubernetesVersion: {{ .cluster_version.value }} + masterInternalName: api.internal.{{ .cluster_name.value }} + masterPublicName: api.{{ .cluster_name.value }} + networkCIDR: {{ .vpc_cidr_block.value }} + networkID: {{ .vpc_id.value }} + # + # Ref1: https://github.com/kubernetes/kops/blob/master/docs/networking.md + # + networking: + calico: + majorVersion: {{ .networking_calico_major_version.value }} + nonMasqueradeCIDR: 100.64.0.0/10 + # + # This array configures the CIDRs that are able to ssh into nodes. On AWS this + # is manifested as inbound security group rules on the nodes and master + # security groups. + # + sshAccess: + - {{ .shared_vpc_cidr_block.value }} + subnets: + # + # Define all public (utility) subnets that should be available for the cluster + # + {{ range $az, $id := .public_subnet_ids.value }} + - id: {{ $id }} + name: utility-{{ $az }} + type: Utility + zone: {{ $az }} + {{ end }} + # + # Define all private subnets that should be available for the cluster + # + {{ range $az, $id := .private_subnet_ids.value }} + - id: {{ $id }} + name: {{ $az }} + type: Private + zone: {{ $az }} + egress: {{ index $.nat_gateway_ids.value $az }} + {{ end }} + # + # Cluster Topology + # + topology: + dns: + type: Private + masters: private + nodes: private + metricsServer: + enabled: true + insecure: true + clusterAutoscaler: + enabled: true + {{ if .irsa_enabled.value }} + serviceAccountIssuerDiscovery: + discoveryStore: s3://{{ .irsa_bucket_name.value }} + enableAWSOIDCProvider: true + {{ end }} + {{ if .karpenter_enabled.value }} + karpenter: + enabled: true + {{ end }} + + # + # Access Mgmt + # + iam: + allowContainerRegistry: true + {{ if .irsa_enabled.value }} + useServiceAccountExternalPermissions: true + {{ end }} + kubelet: + anonymousAuth: false +--- + +# +# Create as many master nodes as were defined +# +{{ range .cluster_master_azs.value }} +apiVersion: kops/v1alpha2 +kind: InstanceGroup +metadata: + labels: + kops.k8s.io/cluster: {{ $.cluster_name.value }} + name: master-{{ . }} +spec: + image: {{ $.kops_ami_id.value }} + kubernetesVersion: {{ $.cluster_version.value }} + machineType: {{ $.kops_master_machine_type.value }} + maxSize: {{ $.kops_master_machine_max_size.value }} + minSize: {{ $.kops_master_machine_min_size.value }} + role: Master + subnets: + - {{ . }} +--- + {{ end }} + +# +# Instance group (workers) are defined below, starting with a single group +# +apiVersion: kops/v1alpha2 +kind: InstanceGroup +metadata: + labels: + kops.k8s.io/cluster: {{ .cluster_name.value }} + name: nodes +spec: + {{ if .karpenter_enabled.value }} + manager: Karpenter + {{ end }} + {{ if .node_cloud_labels.value }} + cloudLabels: + {{ range $name, $value := .node_cloud_labels.value }} + {{ $name }}: "{{ $value }}" + {{ end }} + {{ end }} + image: {{ $.kops_ami_id.value }} + kubernetesVersion: {{ .cluster_version.value }} + + machineType: {{ $.kops_worker_machine_type.value }} + + {{ if .karpenter_enabled.value }} + mixedInstancesPolicy: + instances: + {{ range $.kops_worker_machine_types_karpenter.value }} + - {{ . }} + {{ end }} + #onDemandAboveBase: 5 + #spotAllocationStrategy: price-capacity-optimized + {{ end }} + + maxSize: {{ $.kops_worker_machine_max_size.value }} + minSize: {{ $.kops_worker_machine_min_size.value }} + role: Node + subnets: + {{ range .availability_zones.value }} + - {{ . }} + {{ end }} diff --git a/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-update.sh b/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-update.sh new file mode 100755 index 000000000..b61c88e07 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/2-kops/cluster-update.sh @@ -0,0 +1,74 @@ +#!/bin/bash +set -e -o pipefail + +# +# Import common cluster config +# +echo "Sourcing..." +source cluster-config.sh + +# +# Create/update the kops manifest from terraform output values +# +echo "Creating template..." +kops toolbox template \ + --name ${CLUSTER_NAME} \ + --values ${VALUES_FILE} \ + --template ${CLUSTER_TEMPLATE} \ + --format-yaml > ${CLUSTER_FILE} + +set -x + +# +# Import the cluster manifest into kops S3 remote state +# +echo "Importing manifest into S3..." +kops replace \ + -f ${CLUSTER_FILE} \ + --state ${ClUSTER_STATE} \ + --name ${CLUSTER_NAME} \ + --force + +# +# Create SSH public key (this is only needed the 1st time but it won't break if ran again) +# +echo "Creating the secret..." +kops create \ + --state ${ClUSTER_STATE} \ + --name ${CLUSTER_NAME} \ + sshpublickey \ + --ssh-public-key ${SSH_PUBLIC_KEY} + +# +# Generate the cluster in terraform format +# +echo "Generating terraform templates..." +kops update cluster \ + --target terraform \ + --state ${ClUSTER_STATE} \ + --name ${CLUSTER_NAME} \ + --create-kube-config=true \ + --out . + +# +# Remove AWS provider block from the generated kubernetes.tf.example as it is already declared in config.tf +# +echo "Processing file..." +input="kubernetes.tf" + +remove_block() { + local block_name="$1" + local start_pattern="$2" + local end_pattern="$3" + + awk -v start="$start_pattern" -v end="$end_pattern" ' + $0 ~ start { inside_block = 1; next } + !inside_block { print } + $0 ~ end { inside_block = 0 } + ' "$block_name" +} + +remove_block ${input} "^provider \"aws\" {" "^}" > kubernetes.tf.bak +mv kubernetes.tf.bak kubernetes.tf +remove_block ${input} "^terraform {" "^}" > kubernetes.tf.bak +mv kubernetes.tf.bak kubernetes.tf diff --git a/apps-devstg/us-east-1/k8s-kops/2-kops/config.tf b/apps-devstg/us-east-1/k8s-kops/2-kops/config.tf new file mode 100644 index 000000000..480f34861 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/2-kops/config.tf @@ -0,0 +1,33 @@ +# Variables +variable "profile" { + description = "AWS Profile" +} + +variable "region" { + description = "AWS Region" +} + +# AWS Provider +provider "aws" { + region = "ca-central-1" + profile = var.profile +} + +provider "aws" { + alias = "files" + region = "ca-central-1" + profile = var.profile +} + +# Backend Config (partial) +terraform { + required_version = "~> 1.3" + + required_providers { + aws = "~> 4.10" + } + + backend "s3" { + key = "apps-devstg/ca-central-1/k8s-kops/2-kops/terraform.tfstate" + } +} diff --git a/apps-devstg/us-east-1/k8s-kops/2-kops/values.yaml b/apps-devstg/us-east-1/k8s-kops/2-kops/values.yaml new file mode 100644 index 000000000..b2446e8de --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/2-kops/values.yaml @@ -0,0 +1 @@ +{ "availability_zones": { "sensitive": false, "type": [ "tuple", [ "string", "string" ] ], "value": [ "ca-central-1a", "ca-central-1b" ] }, "cluster_api_elb_extra_security_group": { "sensitive": false, "type": "string", "value": "" }, "cluster_master_azs": { "sensitive": false, "type": [ "tuple", [ "string" ] ], "value": [ "ca-central-1a" ] }, "cluster_name": { "sensitive": false, "type": "string", "value": "canada01-kops.devstg.k8s.local" }, "cluster_version": { "sensitive": false, "type": "string", "value": "1.28.9" }, "devops_role": { "sensitive": false, "type": "string", "value": "arn:aws:iam::975049917749:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_DevOps_551c774bdd01c716" }, "environment": { "sensitive": false, "type": "string", "value": "apps-devstg" }, "etcd_clusters_version": { "sensitive": false, "type": "string", "value": "3.5.9" }, "gossip_cluster": { "sensitive": false, "type": "bool", "value": true }, "irsa_bucket_name": { "sensitive": false, "type": "string", "value": "base-irsa-canada01-kops-devstg-k8s-local" }, "irsa_enabled": { "sensitive": false, "type": "bool", "value": true }, "karpenter_enabled": { "sensitive": false, "type": "bool", "value": true }, "kops_ami_id": { "sensitive": false, "type": "string", "value": "ami-04fea581fe25e2675" }, "kops_master_machine_max_size": { "sensitive": false, "type": "number", "value": 1 }, "kops_master_machine_min_size": { "sensitive": false, "type": "number", "value": 1 }, "kops_master_machine_type": { "sensitive": false, "type": "string", "value": "t3.medium" }, "kops_s3_bucket": { "sensitive": false, "type": "string", "value": "base-state-canada01-kops-devstg-k8s-local" }, "kops_worker_machine_max_size": { "sensitive": false, "type": "number", "value": 5 }, "kops_worker_machine_min_size": { "sensitive": false, "type": "number", "value": 1 }, "kops_worker_machine_type": { "sensitive": false, "type": "string", "value": "t3.medium" }, "kops_worker_machine_types_karpenter": { "sensitive": false, "type": [ "tuple", [ "string", "string", "string", "string", "string", "string", "string" ] ], "value": [ "t2.medium", "t2.large", "t3.medium", "t3.large", "t3a.medium", "t3a.large", "m4.large" ] }, "nat_gateway_ids": { "sensitive": false, "type": [ "object", { "ca-central-1a": "string", "ca-central-1b": "string" } ], "value": { "ca-central-1a": "nat-0e7fe4387fc5f2224", "ca-central-1b": "nat-0e7fe4387fc5f2224" } }, "networking_calico_major_version": { "sensitive": false, "type": "string", "value": "v3" }, "node_cloud_labels": { "sensitive": false, "type": [ "object", {} ], "value": {} }, "private_subnet_ids": { "sensitive": false, "type": [ "object", { "ca-central-1a": "string", "ca-central-1b": "string" } ], "value": { "ca-central-1a": "subnet-017e46b6703e2d2c7", "ca-central-1b": "subnet-06bd06b2ced66ec68" } }, "profile": { "sensitive": false, "type": "string", "value": "base-apps-devstg-devops" }, "project_short": { "sensitive": false, "type": "string", "value": "base" }, "public_subnet_ids": { "sensitive": false, "type": [ "object", { "ca-central-1a": "string", "ca-central-1b": "string" } ], "value": { "ca-central-1a": "subnet-06e35d1822bf99077", "ca-central-1b": "subnet-04d9a10530b95edf6" } }, "region": { "sensitive": false, "type": "string", "value": "us-east-1" }, "shared_vpc_cidr_block": { "sensitive": false, "type": "string", "value": "172.18.0.0/20" }, "ssh_pub_key_path": { "sensitive": false, "type": "string", "value": "/home/jdelacamara/Dev/work/J_and_J/security/aws-instances-ssh-keys/aws_instances_binbash.pub" }, "vpc_cidr_block": { "sensitive": false, "type": "string", "value": "10.0.0.0/16" }, "vpc_id": { "sensitive": false, "type": "string", "value": "vpc-07945bbaa80ff6f89" } } diff --git a/apps-devstg/us-east-1/k8s-kops/3-extras/README.md b/apps-devstg/us-east-1/k8s-kops/3-extras/README.md new file mode 100644 index 000000000..1427e87eb --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/3-extras/README.md @@ -0,0 +1,3 @@ +Note for creating resources in the cluster, such as Helm releases, you must copy the KUBECONFIG you got in directory `2-kops` to this directory and update the name properly in the `config.tf` file for the Kubernetes and Helm providers. + +Also, if you have set a VPN server you must connect to it. diff --git a/apps-devstg/us-east-1/k8s-kops/3-extras/chart-values/externaldns.yaml b/apps-devstg/us-east-1/k8s-kops/3-extras/chart-values/externaldns.yaml new file mode 100644 index 000000000..49fc312c0 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/3-extras/chart-values/externaldns.yaml @@ -0,0 +1,36 @@ +# When enabled, prints DNS record changes rather than actually performing them +dryRun: false +# K8s resources type to be observed for new DNS entries by ExternalDNS +sources: + - traefik-proxy +# DNS provider where the DNS records will be created +provider: aws +# Limit possible target zones by domain suffixes (optional) +domainFilters: + - ${filteredDomain} +# Limit possible target zones by zone id (optional) +zoneIdFilters: + - ${filteredZoneId} +# Modify how DNS records are synchronized between sources and providers (options: sync, upsert-only) +policy: sync +# Registry Type. Available types are: txt, noop +## ref: https://github.com/kubernetes-sigs/external-dns/blob/master/docs/proposal/registry.md +registry: txt +# TXT Registry Identifier +txtOwnerId: ${txtOwnerId} +# Filter sources managed by external-dns via annotation using label selector semantics (optional) +#annotationFilter: ${annotationFilter} + +# Adjust the interval for DNS updates +interval: 3m + +# AWS settings +aws: + zoneType: ${zoneType} + +# Create a service account and tie it to the external-dns role +#serviceAccount: +# create: true +# name: ${serviceAccountName} +# annotations: +# eks.amazonaws.com/role-arn: ${roleArn} diff --git a/apps-devstg/us-east-1/k8s-kops/3-extras/chart-values/traefik.yaml b/apps-devstg/us-east-1/k8s-kops/3-extras/chart-values/traefik.yaml new file mode 100644 index 000000000..b324ad10a --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/3-extras/chart-values/traefik.yaml @@ -0,0 +1,54 @@ +service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +experimental: + plugins: + subfilter: + moduleName: github.com/DirtyCajunRice/subfilter + version: v0.5.0 +certResolvers: + letsencryptresolver: + # for challenge options cf. https://doc.traefik.io/traefik/https/acme/ + email: letsencrypt+costenginetool@binbash.co + #dnsChallenge: + # # also add the provider's required configuration under env + # # or expand then from secrets/configmaps with envfrom + # # cf. https://doc.traefik.io/traefik/https/acme/#providers + # provider: digitalocean + # # add futher options for the dns challenge as needed + # # cf. https://doc.traefik.io/traefik/https/acme/#dnschallenge + # delayBeforeCheck: 30 + # resolvers: + # - 1.1.1.1 + # - 8.8.8.8 + tlsChallenge: true + httpChallenge: + entryPoint: "web" + # It has to match the path with a persistent volume + storage: "/certs/costenginetool.binbash.co.json" +persistence: + enabled: true + name: certs + # existingClaim: "" + accessMode: ReadWriteOnce + size: 128Mi + # storageClass: "" + # volumeName: "" + path: /certs + annotations: {} + # -- Only mount a subpath of the Volume into the pod + # subPath: "" +deployment: + initContainers: + # The "volume-permissions" init container is required if you run into permission issues. + # Related issue: https://github.com/traefik/traefik-helm-chart/issues/396 + - name: volume-permissions + image: busybox:latest + command: ["sh", "-c", "touch /certs/costenginetool.binbash.co.json; chown 65532 /certs/costenginetool.binbash.co.json; chmod -v 600 /certs/costenginetool.binbash.co.json"] + securityContext: + runAsNonRoot: false + runAsGroup: 0 + runAsUser: 0 + volumeMounts: + - name: certs + mountPath: /certs diff --git a/apps-devstg/us-east-1/k8s-kops/3-extras/common-variables.tf b/apps-devstg/us-east-1/k8s-kops/3-extras/common-variables.tf new file mode 120000 index 000000000..2f807a597 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/3-extras/common-variables.tf @@ -0,0 +1 @@ +../../../../config/common-variables.tf \ No newline at end of file diff --git a/apps-devstg/us-east-1/k8s-kops/3-extras/config.tf b/apps-devstg/us-east-1/k8s-kops/3-extras/config.tf new file mode 100644 index 000000000..ed6fd8882 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/3-extras/config.tf @@ -0,0 +1,53 @@ + +#------------------------------------------------------------------------------ +# Providers +#------------------------------------------------------------------------------ +provider "aws" { + region = "ca-central-1" + profile = var.profile +} + +provider "aws" { + alias = "shared" + region = var.region + profile = "${var.project}-shared-devops" +} + +provider "kubernetes" { + config_path = "canada01-kops.devstg.k8s.local" + config_context = "canada01-kops.devstg.k8s.local" +} + +provider "helm" { + kubernetes { + config_path = "canada01-kops.devstg.k8s.local" + config_context = "canada01-kops.devstg.k8s.local" + } +} + +#------------------------------------------------------------------------------ +# Backend Config (partial) +#------------------------------------------------------------------------------ +terraform { + required_version = "~> 1.3" + + required_providers { + aws = "~> 4.10" + kubernetes = "~> 2.11" + helm = "~> 2.13" + } + backend "s3" { + key = "apps-devstg/ca-central-1/k8s-kops/3-extras/terraform.tfstate" + } +} + +data "terraform_remote_state" "shared-dns" { + backend = "s3" + + config = { + region = var.region + profile = "${var.project}-shared-devops" + bucket = "${var.project}-shared-terraform-backend" + key = "shared/global/dns/binbash.co/terraform.tfstate" + } +} diff --git a/apps-devstg/us-east-1/k8s-kops/3-extras/external-dns.tf b/apps-devstg/us-east-1/k8s-kops/3-extras/external-dns.tf new file mode 100644 index 000000000..475e47404 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/3-extras/external-dns.tf @@ -0,0 +1,31 @@ +#resource "kubernetes_namespace" "externaldns" { +# count = var.externaldns ? 1 : 0 +# +# metadata { +# labels = local.labels +# name = "externaldns" +# } +#} +# +#resource "helm_release" "externaldns_public" { +# count = var.externaldns ? 1 : 0 +# +# name = "externaldns-public" +# namespace = kubernetes_namespace.externaldns[0].id +# repository = "https://charts.bitnami.com/bitnami" +# chart = "external-dns" +# version = "6.14.4" +# values = [ +# templatefile("chart-values/externaldns.yaml", { +# filteredDomain = "costenginetool.binbash.co" +# filteredZoneId = data.terraform_remote_state.shared-dns.outputs.aws_public_zone_id[0] +# txtOwnerId = "${var.environment}-kops-pub" +# #annotationFilter = "kubernetes.io/ingress.class=${local.public_ingress_class}" +# zoneType = "public" +# serviceAccountName = "externaldns-public" +# #roleArn = data.terraform_remote_state.eks-identities.outputs.public_externaldns_role_arn +# }) +# ] +# +# depends_on = [kubernetes_namespace.externaldns] +#} diff --git a/apps-devstg/us-east-1/k8s-kops/3-extras/locals.tf b/apps-devstg/us-east-1/k8s-kops/3-extras/locals.tf new file mode 100644 index 000000000..0e85cc6d8 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/3-extras/locals.tf @@ -0,0 +1,7 @@ +locals { + labels = { + environment = var.environment + "app.kubernetes.io/managed-by" = "Terraform" + "app.kubernetes.io/part-of" = var.environment + } +} diff --git a/apps-devstg/us-east-1/k8s-kops/3-extras/route53record.tf b/apps-devstg/us-east-1/k8s-kops/3-extras/route53record.tf new file mode 100644 index 000000000..dee6fafe3 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/3-extras/route53record.tf @@ -0,0 +1,39 @@ +data "aws_lb" "traefik" { + count = var.traefik && var.create_route53_record ? 1 : 0 + + tags = { + "kubernetes.io/service-name": "traefik/traefik" + } + + depends_on = [resource.helm_release.traefik] +} + +resource "aws_route53_record" "traefik-faina" { + provider = aws.shared + + count = var.traefik && var.create_route53_record ? 1 : 0 + + allow_overwrite = true + name = "subdomain.binbash.co" + records = [data.aws_lb.traefik[0].dns_name] + ttl = 3600 + type = "CNAME" + zone_id = data.terraform_remote_state.shared-dns.outputs.public_zone_id + + depends_on = [data.aws_lb.traefik] +} + +resource "aws_route53_record" "traefik-therecord" { + provider = aws.shared + + count = var.traefik && var.create_route53_record ? 1 : 0 + + allow_overwrite = true + name = "ca.therecord.binbash.co" + records = [data.aws_lb.traefik[0].dns_name] + ttl = 3600 + type = "CNAME" + zone_id = data.terraform_remote_state.shared-dns.outputs.public_zone_id + + depends_on = [data.aws_lb.traefik] +} diff --git a/apps-devstg/us-east-1/k8s-kops/3-extras/terraform.auto.tfvars b/apps-devstg/us-east-1/k8s-kops/3-extras/terraform.auto.tfvars new file mode 100644 index 000000000..2936161cb --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/3-extras/terraform.auto.tfvars @@ -0,0 +1,5 @@ +# INGRESS +traefik = true + +# EXTERNAL DNS +externaldns = false diff --git a/apps-devstg/us-east-1/k8s-kops/3-extras/traefik.tf b/apps-devstg/us-east-1/k8s-kops/3-extras/traefik.tf new file mode 100644 index 000000000..cd81973a2 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/3-extras/traefik.tf @@ -0,0 +1,19 @@ +resource "kubernetes_namespace" "traefik" { + count = var.traefik ? 1 : 0 + + metadata { + labels = local.labels + name = "traefik" + } +} +resource "helm_release" "traefik" { + count = var.traefik ? 1 : 0 + name = "traefik" + namespace = kubernetes_namespace.traefik[0].id + repository = "https://traefik.github.io/charts" + chart = "traefik" + version = "v28.0.0" + values = [file("chart-values/traefik.yaml")] + + depends_on = [kubernetes_namespace.traefik] +} diff --git a/apps-devstg/us-east-1/k8s-kops/3-extras/variables.tf b/apps-devstg/us-east-1/k8s-kops/3-extras/variables.tf new file mode 100644 index 000000000..737667f1a --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/3-extras/variables.tf @@ -0,0 +1,15 @@ +variable "traefik" { + description = "Enable Traefik" + type = bool +} + +variable "create_route53_record" { + description = "Whether to create the Route53 record" + type = bool + default = true +} + +variable "externaldns" { + description = "Enable ExternalDNS" + type = bool +} diff --git a/apps-devstg/us-east-1/k8s-kops/README.md b/apps-devstg/us-east-1/k8s-kops/README.md new file mode 100644 index 000000000..f090ff82a --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/README.md @@ -0,0 +1,213 @@ +
+ binbash +
+
+ leverage +
+ +# Reference Architecture: Terraform AWS Kubernetes Kops Cluster + +## Kops Pre-requisites + +To develop this Kops K8s Cluster you need a VPC (with private/public subnets and NAT gateway enabled) to create the cluster in. + +!!! Info + If you want Karpeneter enabled the subnets on which the cluster will be deployed need to have these tags: + + ``` + "kops.k8s.io/instance-group/nodes" = "true" + "kubernetes.io/cluster/cluster01-kops.devstg.k8s.local" = "true" + ``` + + Note you have to set your cluster name in the tag. + +### Overview + +K8s clusters provisioned by Kops have a number of resources that need to be available before the +cluster is created. These are Kops pre-requisites and they are defined in the `1-prerequisites` +directory which includes all Terraform files used to create/modify these resources. + +## Steps to create it + +1. Set up and apply prerequisites layers +2. In 2-kops + - create the cluster definition + - apply layer + - get the KUBECONFIG file +3. get the KUBECONFIG file, set up and apply the layer + +### 1 - prerequisites + +Edit the `locals.tf` file. + +Edit the cluster name: + +``` hcl + base_domain_name = "k8s.local" + k8s_cluster_name = "cluster01-kops.${local.short_environment}.${local.base_domain_name}" +``` + +Note the `k8s.local` base domain will force the creation of a "gossip cluster", i.e. a private cluster. + +In this case you'll need to be inside the VPN to reach the API and will need to create a LB to access your apps. + +Set the type and number of master and worker nodes: + +``` hcl + # K8s Kops Master Nodes Machine (EC2) type and size + ASG Min-Max per AZ + # then min/max = 1 will create 1 Master Node x AZ => 3 x Masters + kops_master_machine_type = "t3.medium" + kops_master_machine_max_size = 1 + kops_master_machine_min_size = 1 + + # K8s Kops Worker Nodes Machine (EC2) type and size + ASG Min-Max + kops_worker_machine_type = "t3.medium" + kops_worker_machine_max_size = 5 + kops_worker_machine_min_size = 1 + # If you use Karpenter set the list of types here + kops_worker_machine_types_karpenter = ["t2.medium", "t2.large", "t3.medium", "t3.large", "t3a.medium", "t3a.large", "m4.large"] +``` + +Note the last variable is for Karpenter. If Karpenter is enabled the `kops_worker_machine_*` are not used. + +Set the number of AZs on which the master nodes will be spread: + +``` hcl + number_of_cluster_master_azs = 1 +``` + +Then, go around the other variables and set them as per your needs. + +Apply it as usual: + +``` shell +leverage tf apply +``` + +#### Note on VPCs + +In the `config.tf` file you'll find: + +``` hcl +data "terraform_remote_state" "vpc" { + backend = "s3" + + config = { + region = var.region + profile = var.profile + bucket = var.bucket + key = "${var.environment}/ca-central-1/kops-network/terraform.tfstate" + } +} + +data "terraform_remote_state" "vpc-shared" { + backend = "s3" + + config = { + region = var.region + profile = "${var.project}-shared-devops" + bucket = "${var.project}-shared-terraform-backend" + key = "shared/us-east-1/network/terraform.tfstate" + } +} +``` + +These are the remote states for the "vpc", the vpc in which the cluster will be created, and the "vpc-shared", the vpc in wich the VPN Server is working, so we can accept connections from there. + + +#### Note on Karpenter + +To use Karpeneter you need to create a service linked role so spot can be used. + + +```shell +aws iam create-service-linked-role --aws-service-name spot.amazonaws.com +``` + +### 2 - kops + +Create the cluster definition: + +``` shell +make cluster-update +``` + +Apply the layer: + +``` shell +leverage tf apply +``` + +Get the Kubeconfig: + +``` shell +make kops-kubeconfig +``` + +The KUBECONFIG will be saved to a file named same as the cluster. E.g. `cluster01-kops.devstg.k8s.local`. + +So you can: + +``` shell +export KUBECONFIG=/path/to/your/file/cluster01-kops.devstg.k8s.local +``` + +...and once the cluster is up and running (and you are in the VPN), you'll be able to access the cluster! + +### 3 - extras + +You have to copy the KUBECONFIG file from the previous step and set the name in the `config.tf` file: + +``` hcl +provider "kubernetes" { + config_path = "cluster01-kops.devstg.k8s.local" + config_context = "cluster01-kops.devstg.k8s.local" +} + +provider "helm" { + kubernetes { + config_path = "cluster01-kops.devstg.k8s.local" + config_context = "cluster01-kops.devstg.k8s.local" + } +} +``` + +Now set and apply the layer. + +``` shell +leverage tf apply +``` + +Note Traefik is set as default. It will create a public LB. + +Also note a file called `route53record.tf` will create a record in Route53 pointing to the LB. + +Since this is set using the standard Leverage setup, you'll find in the `config.tf` file the remote state for the DNS: + +``` hcl +data "terraform_remote_state" "shared-dns" { + backend = "s3" + + config = { + region = var.region + profile = "${var.project}-shared-devops" + bucket = "${var.project}-shared-terraform-backend" + key = "shared/global/dns/binbash.co/terraform.tfstate" + } +} +``` + + +## Leverage Documentation + +- **Binbash Leverage Cookbook** + - [K8s KOPS Cookbook](https://leverage.binbash.co/user-guide/cookbooks/k8s/) +- **How it works** + - [Overview](https://leverage.binbash.com.ar/how-it-works/compute/overview/) + - [K8s Kops](https://leverage.binbash.com.ar/how-it-works/compute/k8s-kops/) +- **User guide** + 1. [Configurations](https://leverage.binbash.com.ar/user-guide/base-configuration/repo-le-tf-infra-aws/) + 2. [Workflow](https://leverage.binbash.com.ar/user-guide/base-workflow/repo-le-tf-infra-aws/) + 3. [K8s Kops](https://leverage.binbash.com.ar/user-guide/compute/k8s-kops/) diff --git a/apps-devstg/us-east-1/k8s-kops/TODO.md b/apps-devstg/us-east-1/k8s-kops/TODO.md new file mode 100644 index 000000000..ba36aa4c1 --- /dev/null +++ b/apps-devstg/us-east-1/k8s-kops/TODO.md @@ -0,0 +1,4 @@ +# TO DO + +- Install external-dns +