Skip to content

Commit

Permalink
[installer] Add EKS installer test
Browse files Browse the repository at this point in the history
  • Loading branch information
nandajavarma committed Jun 16, 2022
1 parent ba85e5b commit b9828ea
Show file tree
Hide file tree
Showing 8 changed files with 485 additions and 10 deletions.
85 changes: 85 additions & 0 deletions .werft/eks-installer-tests.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/eks-installer-tests.yaml -a debug=true`
pod:
serviceAccount: werft
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: dev/workload
operator: In
values:
- "builds"
securityContext:
runAsUser: 0
volumes:
- name: sh-playground-sa-perm
secret:
secretName: sh-playground-sa-perm
- name: sh-playground-dns-perm
secret:
secretName: sh-playground-dns-perm
- name: sh-aks-perm
secret:
secretName: aks-credentials
containers:
- name: nightly-test
image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:cw-werft-cred.0
workingDir: /workspace
imagePullPolicy: Always
volumeMounts:
- name: sh-playground-sa-perm
mountPath: /mnt/secrets/sh-playground-sa-perm
- name: sh-playground-dns-perm # this sa is used for the DNS management
mountPath: /mnt/secrets/sh-playground-dns-perm
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-credentials
key: aws-access-key
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-credentials
key: aws-secret-key
- name: AWS_REGION
valueFrom:
secretKeyRef:
name: aws-credentials
key: aws-region
- name: WERFT_HOST
value: "werft.werft.svc.cluster.local:7777"
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
- name: WERFT_K8S_NAMESPACE
value: "werft"
- name: WERFT_K8S_LABEL
value: "component=werft"
- name: TF_VAR_sa_creds
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
- name: TF_VAR_dns_sa_creds
value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- bash
- -c
- |
sleep 1
set -Eeuo pipefail
sudo chown -R gitpod:gitpod /workspace
sudo apt update && apt install gettext-base
export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)
(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
printf '{{ toJson . }}' > context.json
npx ts-node .werft/installer-tests.ts "STANDARD_EKS_TEST"
# The bit below makes this a cron job
# plugins:
# cron: "15 3 * * *"
22 changes: 22 additions & 0 deletions .werft/installer-tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,11 @@ const INFRA_PHASES: { [name: string]: InfraConfig } = {
makeTarget: "aks-standard-cluster",
description: "Creating an aks cluster(azure)",
},
STANDARD_EKS_CLUSTER: {
phase: "create-std-eks-cluster",
makeTarget: "eks-standard-cluster",
description: "Creating a EKS cluster with 1 nodepool each for workspace and server",
},
CERT_MANAGER: {
phase: "setup-cert-manager",
makeTarget: "cert-manager",
Expand Down Expand Up @@ -174,6 +179,23 @@ const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = {
"DESTROY",
],
},
STANDARD_EKS_TEST: {
DESCRIPTION: "Create an EKS cluster",
PHASES: [
"STANDARD_EKS_CLUSTER",
"CERT_MANAGER",
// TODO phases are:
// 1) register domains in AWS, associate with route53
// 2) add the associated ns record to gcp(since we use gitpod-self-hsoted.com domain)
// 3) create cluster issuer with route53 as solver
"GENERATE_KOTS_CONFIG",
"INSTALL_GITPOD",
// "CHECK_INSTALLATION",
// "RUN_INTEGRATION_TESTS",
"RESULTS",
"DESTROY",
],
},
STANDARD_K3S_PREVIEW: {
DESCRIPTION: "Create a SH Gitpod preview environment on a K3s cluster, created on a GCP instance",
PHASES: [
Expand Down
232 changes: 232 additions & 0 deletions install/infra/terraform/eks/kubernetes.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,232 @@
terraform {
required_providers {
kubectl = {
source = "gavinbunney/kubectl"
version = ">= 1.7.0"
}
aws = {
version = " ~> 3.0"
source = "registry.terraform.io/hashicorp/aws"
}
}
}

resource "aws_iam_role" "eks_cluster" {
depends_on = [data.aws_subnet_ids.subnet_ids]
name = "iam-${var.cluster_name}"

assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}

resource "aws_iam_role_policy_attachment" "AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.eks_cluster.name
}

resource "aws_iam_role_policy_attachment" "AmazonEKSServicePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = aws_iam_role.eks_cluster.name
}

resource "aws_eks_cluster" "aws_eks" {
name = var.cluster_name
role_arn = aws_iam_role.eks_cluster.arn

vpc_config {
subnet_ids = data.aws_subnet_ids.subnet_ids.ids
}

tags = {
Name = "EKS_tuto"
}

depends_on = [
aws_iam_role.eks_cluster,
]
}

data "aws_eks_cluster" "cluster" {
depends_on = [
aws_eks_cluster.aws_eks,
]
name = resource.aws_eks_cluster.aws_eks.id
}

data "aws_eks_cluster_auth" "cluster" {
depends_on = [
aws_eks_cluster.aws_eks,
]
name = resource.aws_eks_cluster.aws_eks.id
}


resource "aws_iam_role" "eks_nodes" {
name = "iam-ng-${var.cluster_name}"

assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}

resource "aws_iam_role_policy_attachment" "AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.eks_nodes.name
}

resource "aws_iam_role_policy_attachment" "AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.eks_nodes.name
}

locals {
map_roles = <<ROLES
- rolearn: ${aws_iam_role.eks_nodes.arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
ROLES
}

resource "aws_iam_role_policy_attachment" "AmazonSSMManagedInstanceCore" {
policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
role = aws_iam_role.eks_nodes.name
}

resource "aws_iam_role_policy_attachment" "AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.eks_nodes.name
}

resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilderECRContainerBuilds" {
policy_arn = "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilderECRContainerBuilds"
role = aws_iam_role.eks_nodes.name
}

resource "aws_launch_template" "eks" {
name = "${var.cluster_name}-template"
update_default_version = true
block_device_mappings {
device_name = "/dev/sda1"
ebs {
volume_size = 100
}
}
credit_specification {
cpu_credits = "standard"
}
ebs_optimized = true
# AMI generated with packer (is private)
image_id = "ami-0f08b4b1a4fd3ebe3"
network_interfaces {
associate_public_ip_address = false
}
}

resource "aws_eks_node_group" "workspace" {
cluster_name = aws_eks_cluster.aws_eks.name
node_group_name = "ngw-${var.cluster_name}"
node_role_arn = aws_iam_role.eks_nodes.arn
subnet_ids = data.aws_subnet_ids.subnet_ids.ids
instance_types = ["m6i.2xlarge"]
labels = {
"gitpod.io/workload_workspace_services" = true
"gitpod.io/workload_workspace_regular" = true
"gitpod.io/workload_workspace_headless" = true
}

scaling_config {
desired_size = 1
max_size = 10
min_size = 1
}

# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
depends_on = [
resource.aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore,
resource.aws_iam_role_policy_attachment.AmazonEC2ContainerRegistryReadOnly,
resource.aws_iam_role_policy_attachment.EC2InstanceProfileForImageBuilderECRContainerBuilds,
]

launch_template {
id = resource.aws_launch_template.eks.id
version = aws_launch_template.eks.latest_version
}
}

resource "aws_eks_node_group" "services" {
cluster_name = aws_eks_cluster.aws_eks.name
node_group_name = "ngs-${var.cluster_name}"
node_role_arn = aws_iam_role.eks_nodes.arn
subnet_ids = data.aws_subnet_ids.subnet_ids.ids
instance_types = ["m6i.xlarge"]
labels = {
"gitpod.io/workload_meta" = true
"gitpod.io/workload_ide" = true
}

scaling_config {
desired_size = 1
max_size = 10
min_size = 1
}

# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
depends_on = [
resource.aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore,
resource.aws_iam_role_policy_attachment.AmazonEC2ContainerRegistryReadOnly,
resource.aws_iam_role_policy_attachment.EC2InstanceProfileForImageBuilderECRContainerBuilds,
]

launch_template {
id = resource.aws_launch_template.eks.id
version = aws_launch_template.eks.latest_version
}
}

provider "kubectl" {
host = resource.aws_eks_cluster.aws_eks.endpoint
cluster_ca_certificate = base64decode(resource.aws_eks_cluster.aws_eks.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.cluster.token
config_path = var.kubeconfig
}

output host {
value = resource.aws_eks_cluster.aws_eks.endpoint
}

output ca {
sensitive = true
value = base64decode(resource.aws_eks_cluster.aws_eks.certificate_authority[0].data)
}

output token {
sensitive = true
value = data.aws_eks_cluster_auth.cluster.token
}
Loading

0 comments on commit b9828ea

Please sign in to comment.