diff --git a/ci/infra/aws/ami.tf b/ci/infra/aws/ami.tf deleted file mode 100644 index 7a3f5e5f29..0000000000 --- a/ci/infra/aws/ami.tf +++ /dev/null @@ -1,10 +0,0 @@ -data "susepubliccloud_image_ids" "sles15sp2_chost_byos" { - cloud = "amazon" - region = data.aws_region.current.name - state = "active" - - # USE SLES 15 SP2 Container host AMI - this is needed to avoid issues like bsc#1146774 - name_regex = "suse-sles-15-sp2-chost-byos.*-hvm-ssd-x86_64" -} - -data "aws_region" "current" {} diff --git a/ci/infra/aws/cloud-init.tf b/ci/infra/aws/cloud-init.tf index dcbbe3441e..89ab6f1a15 100644 --- a/ci/infra/aws/cloud-init.tf +++ b/ci/infra/aws/cloud-init.tf @@ -1,25 +1,24 @@ -data "template_file" "register_rmt" { - template = file("cloud-init/register-rmt.tpl") - count = var.rmt_server_name == "" ? 0 : 1 +data "template_file" "register_scc" { + count = var.caasp_registry_code != "" && var.rmt_server_name == "" ? 1 : 0 + template = file("${path.module}/cloud-init/register-scc.tpl") vars = { - rmt_server_name = var.rmt_server_name + caasp_registry_code = var.caasp_registry_code } } -data "template_file" "register_scc" { - # register with SCC iff an RMT has not been provided - count = var.caasp_registry_code != "" && var.rmt_server_name == "" ? 1 : 0 - template = file("cloud-init/register-scc.tpl") +data "template_file" "register_rmt" { + count = var.rmt_server_name == "" ? 0 : 1 + template = file("${path.module}/cloud-init/register-rmt.tpl") vars = { - caasp_registry_code = var.caasp_registry_code + rmt_server_name = var.rmt_server_name } } data "template_file" "register_suma" { - template = file("cloud-init/register-suma.tpl") count = var.suma_server_name == "" ? 0 : 1 + template = file("${path.module}/cloud-init/register-suma.tpl") vars = { suma_server_name = var.suma_server_name @@ -28,17 +27,17 @@ data "template_file" "register_suma" { data "template_file" "repositories" { count = length(var.repositories) - template = file("cloud-init/repository.tpl") + template = file("${path.module}/cloud-init/repository.tpl") vars = { - repository_url = var.repositories[count.index] - repository_name = var.repositories[count.index] + repository_url = element(values(var.repositories), count.index) + repository_name = element(keys(var.repositories), count.index) } } data "template_file" "commands" { - template = file("cloud-init/commands.tpl") - count = length(var.packages) == 0 ? 0 : 1 + count = join("", var.packages) == "" ? 0 : 1 + template = file("${path.module}/cloud-init/commands.tpl") vars = { packages = join(", ", var.packages) @@ -46,25 +45,25 @@ data "template_file" "commands" { } data "template_file" "cloud-init" { - template = file("cloud-init/cloud-init.yaml.tpl") + template = file("${path.module}/cloud-init/cloud-init.yaml.tpl") vars = { authorized_keys = join("\n", formatlist(" - %s", var.authorized_keys)) - commands = join("\n", data.template_file.commands.*.rendered) - repositories = length(var.repositories) == 0 ? "\n" : join("\n", data.template_file.repositories.*.rendered) register_scc = var.caasp_registry_code != "" && var.rmt_server_name == "" ? join("\n", data.template_file.register_scc.*.rendered) : "" register_rmt = var.rmt_server_name != "" ? join("\n", data.template_file.register_rmt.*.rendered) : "" register_suma = var.suma_server_name != "" ? join("\n", data.template_file.register_suma.*.rendered) : "" + username = var.username + repositories = length(var.repositories) == 0 ? "\n" : join("\n", data.template_file.repositories.*.rendered) + commands = join("\n", data.template_file.commands.*.rendered) } } data "template_cloudinit_config" "cfg" { gzip = false - base64_encode = false + base64_encode = true part { content_type = "text/cloud-config" content = data.template_file.cloud-init.rendered } } - diff --git a/ci/infra/aws/cloud-init/cloud-init.yaml.tpl b/ci/infra/aws/cloud-init/cloud-init.yaml.tpl index 025cbd298c..79c999b83a 100644 --- a/ci/infra/aws/cloud-init/cloud-init.yaml.tpl +++ b/ci/infra/aws/cloud-init/cloud-init.yaml.tpl @@ -1,4 +1,18 @@ #cloud-config +# vim: syntax=yaml +# +# *********************** +# ---- for more examples look at: ------ +# ---> https://cloudinit.readthedocs.io/en/latest/topics/examples.html +# ---> https://www.terraform.io/docs/providers/template/d/cloudinit_config.html +# ****************************** +# +# This is the configuration syntax that the write_files module +# will know how to understand. encoding can be given b64 or gzip or (gz+b64). +# The content will be decoded accordingly and then written to the path that is +# provided. +# +# Note: Content strings here are truncated for example purposes. # set locale locale: en_US.UTF-8 @@ -6,9 +20,23 @@ locale: en_US.UTF-8 # set timezone timezone: Etc/UTC +users: + - name: ${username} + ssh-authorized-keys: + ${authorized_keys} + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + groups: sudo + shell: /bin/bash + +# Inject the public keys ssh_authorized_keys: ${authorized_keys} +# WARNING!!! Do not use cloud-init packages module when SUSE CaaSP Registration +# Code is provided. In this case, repositories will be added in runcmd module +# with SUSEConnect command after packages module is ran +#packages: + bootcmd: - ip link set dev eth0 mtu 1500 @@ -16,7 +44,7 @@ runcmd: ${register_scc} ${register_rmt} ${register_suma} +${repositories} ${commands} final_message: "The system is finally up, after $UPTIME seconds" - diff --git a/ci/infra/aws/cloud-init/commands.tpl b/ci/infra/aws/cloud-init/commands.tpl index 0576e2db54..9b1c51079f 100644 --- a/ci/infra/aws/cloud-init/commands.tpl +++ b/ci/infra/aws/cloud-init/commands.tpl @@ -1,6 +1,6 @@ - - echo "solver.onlyRequires = true" >> /etc/zypp/zypp.conf - - [ zypper, -n, install, ${packages} ] - - [ ip, link, delete, docker0 ] + - ip link delete docker0 - iptables -L | grep DOCKER | awk {'print $2'} | xargs -d "\n" -i iptables -X {} - iptables-save | awk '/^[*]/ { print $1 "\nCOMMIT" }' | iptables-restore - lsmod | egrep ^iptable_ | awk '{print $1}' | xargs -rd\\n modprobe -r + - echo "solver.onlyRequires = true" >> /etc/zypp/zypp.conf + - zypper -n install ${packages} diff --git a/ci/infra/aws/cloud-init/register-rmt.tpl b/ci/infra/aws/cloud-init/register-rmt.tpl index 7752d4b009..87e34b1d28 100644 --- a/ci/infra/aws/cloud-init/register-rmt.tpl +++ b/ci/infra/aws/cloud-init/register-rmt.tpl @@ -1,4 +1,4 @@ - curl --tlsv1.2 --silent --insecure --connect-timeout 10 https://${rmt_server_name}/rmt.crt --output /etc/pki/trust/anchors/rmt-server.pem && /usr/sbin/update-ca-certificates &> /dev/null - SUSEConnect --url https://${rmt_server_name} - SUSEConnect -p sle-module-containers/15.2/x86_64 - - SUSEConnect -p caasp/4.5/x86_64 + - SUSEConnect -p caasp/4.5/x86_64 -r ${caasp_registry_code} diff --git a/ci/infra/aws/cloud-init/repository.tpl b/ci/infra/aws/cloud-init/repository.tpl index a52656619f..a581de1706 100644 --- a/ci/infra/aws/cloud-init/repository.tpl +++ b/ci/infra/aws/cloud-init/repository.tpl @@ -1,6 +1,2 @@ - - id: ${repository_name} - name: ${repository_name} - baseurl: ${repository_url} - enabled: 1 - autorefresh: 1 - gpgcheck: 0 \ No newline at end of file + - zypper addrepo --refresh ${repository_url} ${repository_name} + - zypper --gpg-auto-import-keys refresh diff --git a/ci/infra/aws/container-openrc.sh b/ci/infra/aws/container-openrc.sh new file mode 100755 index 0000000000..2bbaf2ccec --- /dev/null +++ b/ci/infra/aws/container-openrc.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html + +export AWS_ACCESS_KEY_ID="" +export AWS_SECRET_ACCESS_KEY="" diff --git a/ci/infra/aws/iam_policies.tf b/ci/infra/aws/iam.tf similarity index 99% rename from ci/infra/aws/iam_policies.tf rename to ci/infra/aws/iam.tf index a7333ea955..5f8dea6c7f 100644 --- a/ci/infra/aws/iam_policies.tf +++ b/ci/infra/aws/iam.tf @@ -7,16 +7,16 @@ locals { } resource "aws_iam_instance_profile" "master" { + count = length(var.iam_profile_master) == 0 ? 1 : 0 name = local.aws_iam_instance_profile_master_terraform role = aws_iam_role.master[count.index].name - count = length(var.iam_profile_master) == 0 ? 1 : 0 } resource "aws_iam_role" "master" { + count = length(var.iam_profile_master) == 0 ? 1 : 0 name = local.aws_iam_instance_profile_master_terraform description = "IAM role needed by CPI on master nodes" path = "/" - count = length(var.iam_profile_master) == 0 ? 1 : 0 assume_role_policy = <` here, + # this tag cannot be added to all our resources otherwise the CPI + # will get confused when dealing with security rules objects. + basic_tags = merge( + { + "Name" = var.stack_name + "Environment" = var.stack_name + }, + var.tags, + ) + + # tags = local.basic_tags + tags = merge( + local.basic_tags, + { + format("kubernetes.io/cluster/%v", var.stack_name) = "SUSE-terraform" + }, + ) +} + +# https://www.terraform.io/docs/providers/aws/index.html +provider "aws" { + profile = "default" + region = var.aws_region +} + +data "susepubliccloud_image_ids" "sles15sp2_chost_byos" { + cloud = "amazon" + region = var.aws_region + state = "active" + + # USE SLES 15 SP2 Container host AMI - this is needed to avoid issues like bsc#1146774 + name_regex = "suse-sles-15-sp2-chost-byos.*-hvm-ssd-x86_64" +} + +resource "aws_key_pair" "kube" { + key_name = "${var.stack_name}-keypair" + public_key = element(var.authorized_keys, 0) + + tags = merge( + local.basic_tags, + { + "Name" = "${var.stack_name}-keypair" + "Class" = "KeyPair" + }, + ) +} + +# list of availability_zones which can be access from the current region +data "aws_availability_zones" "availability_zones" { + state = "available" +} diff --git a/ci/infra/aws/master-instance.tf b/ci/infra/aws/master-instance.tf index e81ec0ee3c..2670e3572d 100644 --- a/ci/infra/aws/master-instance.tf +++ b/ci/infra/aws/master-instance.tf @@ -1,27 +1,58 @@ -resource "aws_instance" "control_plane" { - ami = data.susepubliccloud_image_ids.sles15sp2_chost_byos.ids[0] +resource "aws_security_group" "master" { + description = "security rules for master nodes" + name = "${var.stack_name}-master" + vpc_id = aws_vpc.platform.id + + tags = merge( + local.tags, + { + "Name" = "${var.stack_name}-master" + "Class" = "SecurityGroup" + }, + ) + + # etcd - internal + ingress { + from_port = 2379 + to_port = 2380 + protocol = "tcp" + cidr_blocks = [var.cidr_block] + description = "etcd" + } + + # api-server - everywhere + ingress { + from_port = 6443 + to_port = 6443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + description = "kubernetes api-server" + } +} + +# https://www.terraform.io/docs/providers/aws/r/instance.html +resource "aws_instance" "master" { + + count = var.masters + ami = data.susepubliccloud_image_ids.sles15sp2_chost_byos.ids[0] + instance_type = var.master_instance_type + key_name = aws_key_pair.kube.key_name + source_dest_check = false + + availability_zone = var.aws_availability_zones[count.index % length(var.aws_availability_zones)] + # associate_public_ip_address = false + # subnet_id = aws_subnet.private[count.index % length(var.aws_availability_zones)].id associate_public_ip_address = true - count = var.masters - instance_type = var.master_size - key_name = aws_key_pair.kube.key_name - source_dest_check = false - subnet_id = aws_subnet.public.id + subnet_id = aws_subnet.public[count.index % length(var.aws_availability_zones)].id user_data = data.template_cloudinit_config.cfg.rendered - iam_instance_profile = length(var.iam_profile_master) == 0 ? local.aws_iam_instance_profile_master_terraform : var.iam_profile_master + iam_instance_profile = length(var.iam_profile_master) == 0 ? aws_iam_instance_profile.master.0.name : var.iam_profile_master + # ebs_optimized = true depends_on = [ aws_internet_gateway.platform, aws_iam_instance_profile.master, ] - tags = merge( - local.tags, - { - "Name" = "${var.stack_name}-master-${count.index}" - "Class" = "Instance" - }, - ) - vpc_security_group_ids = [ aws_security_group.egress.id, aws_security_group.common.id, @@ -36,8 +67,15 @@ resource "aws_instance" "control_plane" { root_block_device { volume_type = "gp2" - volume_size = 20 + volume_size = var.master_volume_size delete_on_termination = true } -} + tags = merge( + local.tags, + { + "Name" = "${var.stack_name}-master-${count.index}" + "Class" = "Instance" + }, + ) +} diff --git a/ci/infra/aws/network-private.tf b/ci/infra/aws/network-private.tf new file mode 100644 index 0000000000..44b121c505 --- /dev/null +++ b/ci/infra/aws/network-private.tf @@ -0,0 +1,42 @@ +resource "aws_subnet" "private" { + count = length(var.aws_availability_zones) + vpc_id = aws_vpc.platform.id + availability_zone = element(var.aws_availability_zones, count.index) + cidr_block = cidrsubnet(var.cidr_block, 8, count.index + length(var.aws_availability_zones)) + map_public_ip_on_launch = false + + tags = merge( + local.tags, + { + "Name" = "${var.stack_name}-subnet-private-${element(var.aws_availability_zones, count.index)}" + "Class" = "Subnet" + }, + ) +} + +resource "aws_route_table" "private" { + count = length(var.aws_availability_zones) + vpc_id = aws_vpc.platform.id + + tags = merge( + local.tags, + { + "Name" = "${var.stack_name}-route-table-private-${element(var.aws_availability_zones, count.index)}" + "Class" = "RouteTable" + }, + ) +} + +resource "aws_route_table_association" "private" { + count = length(var.aws_availability_zones) + + route_table_id = element(aws_route_table.private.*.id, count.index) + subnet_id = element(aws_subnet.private.*.id, count.index) +} + +resource "aws_route" "private" { + count = length(var.aws_availability_zones) + route_table_id = element(aws_route_table.private.*.id, count.index) + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = element(aws_nat_gateway.public.*.id, count.index) +} diff --git a/ci/infra/aws/network-public.tf b/ci/infra/aws/network-public.tf new file mode 100644 index 0000000000..fa92deac86 --- /dev/null +++ b/ci/infra/aws/network-public.tf @@ -0,0 +1,70 @@ +resource "aws_subnet" "public" { + count = length(var.aws_availability_zones) + vpc_id = aws_vpc.platform.id + availability_zone = element(var.aws_availability_zones, count.index) + cidr_block = cidrsubnet(var.cidr_block, 8, count.index) + map_public_ip_on_launch = true + # depends_on = [aws_main_route_table_association.platform,] + + tags = merge( + local.tags, + { + "Name" = "${var.stack_name}-subnet-public-${element(var.aws_availability_zones, count.index)}" + "Class" = "VPC" + }, + ) +} + +resource "aws_route_table" "public" { + vpc_id = aws_vpc.platform.id + + tags = merge( + local.tags, + { + "Name" = "${var.stack_name}-route-table-public" + "Class" = "RouteTable" + }, + ) +} + +resource "aws_route_table_association" "public" { + count = length(var.aws_availability_zones) + + route_table_id = element(aws_route_table.public.*.id, count.index) + subnet_id = element(aws_subnet.public.*.id, count.index) +} + +resource "aws_route" "public" { + route_table_id = aws_route_table.public.id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.platform.id +} + +resource "aws_eip" "eip" { + count = length(var.aws_availability_zones) + vpc = true + # depends_on = [aws_internet_gateway.platform,] + + tags = merge( + local.tags, + { + "Name" = "${var.stack_name}-eip-eip" + "Class" = "ElasticIP" + }, + ) +} + +resource "aws_nat_gateway" "public" { + count = length(var.aws_availability_zones) + subnet_id = element(aws_subnet.public.*.id, count.index) + allocation_id = element(aws_eip.eip.*.id, count.index) + # depends_on = [aws_eip.eip,] + + tags = merge( + local.tags, + { + "Name" = "${var.stack_name}-nat_gateway-${element(var.aws_availability_zones, count.index)}" + "Class" = "NatGateway" + }, + ) +} diff --git a/ci/infra/aws/vpc_peering_connection.tf b/ci/infra/aws/network-vpc-peer.tf similarity index 96% rename from ci/infra/aws/vpc_peering_connection.tf rename to ci/infra/aws/network-vpc-peer.tf index ff90d58b5b..4f469614ae 100644 --- a/ci/infra/aws/vpc_peering_connection.tf +++ b/ci/infra/aws/network-vpc-peer.tf @@ -40,8 +40,7 @@ resource "aws_route" "public_subnet_to_peer" { resource "aws_route" "private_subnet_to_peer" { count = length(var.peer_vpc_ids) - route_table_id = aws_route_table.private.id + route_table_id = aws_route_table.private.*.id destination_cidr_block = element(data.aws_vpc.peer.*.cidr_block, count.index) vpc_peering_connection_id = element(aws_vpc_peering_connection.tunnel.*.id, count.index) } - diff --git a/ci/infra/aws/network-vpc.tf b/ci/infra/aws/network-vpc.tf new file mode 100644 index 0000000000..c98b91ff6c --- /dev/null +++ b/ci/infra/aws/network-vpc.tf @@ -0,0 +1,48 @@ +# https://www.terraform.io/docs/providers/aws/r/vpc.html +resource "aws_vpc" "platform" { + cidr_block = var.cidr_block + enable_dns_hostnames = true + enable_dns_support = true + + tags = merge( + local.tags, + { + "Name" = "${var.stack_name}-vpc" + "Class" = "VPC" + }, + ) +} + +resource "aws_internet_gateway" "platform" { + vpc_id = aws_vpc.platform.id + depends_on = [aws_vpc.platform,] + + tags = merge( + local.tags, + { + "Class" = "Gateway" + }, + ) +} + +resource "aws_main_route_table_association" "main" { + route_table_id = aws_route_table.public.id + vpc_id = aws_vpc.platform.id +} + +resource "aws_vpc_dhcp_options" "platform" { + domain_name = "${var.aws_region}.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + + tags = merge( + local.tags, + { + "Class" = "VPCDHCP" + }, + ) +} + +resource "aws_vpc_dhcp_options_association" "platform" { + vpc_id = aws_vpc.platform.id + dhcp_options_id = aws_vpc_dhcp_options.platform.id +} diff --git a/ci/infra/aws/network.tf b/ci/infra/aws/network.tf deleted file mode 100644 index 45122eb3e0..0000000000 --- a/ci/infra/aws/network.tf +++ /dev/null @@ -1,160 +0,0 @@ -resource "aws_vpc" "platform" { - cidr_block = var.vpc_cidr_block - enable_dns_hostnames = true - enable_dns_support = true - tags = merge( - local.tags, - { - "Name" = "${var.stack_name}-vpc" - "Class" = "VPC" - }, - ) -} - -# list of az which can be access from the current region -data "aws_availability_zones" "az" { - state = "available" - - filter { - name = var.availability_zones_filter.name - values = var.availability_zones_filter.values - } -} - -resource "aws_vpc_dhcp_options" "platform" { - domain_name = "${data.aws_region.current.name}.compute.internal" - domain_name_servers = ["AmazonProvidedDNS"] - tags = merge( - local.tags, - { - "Class" = "VPCDHCP" - }, - ) -} - -resource "aws_vpc_dhcp_options_association" "dns_resolver" { - dhcp_options_id = aws_vpc_dhcp_options.platform.id - vpc_id = aws_vpc.platform.id -} - -resource "aws_internet_gateway" "platform" { - tags = merge( - local.tags, - { - "Class" = "Gateway" - }, - ) - vpc_id = aws_vpc.platform.id - depends_on = [aws_vpc.platform] -} - -resource "aws_subnet" "public" { - availability_zone = element(data.aws_availability_zones.az.names, 0) - cidr_block = var.public_subnet - depends_on = [aws_main_route_table_association.main] - map_public_ip_on_launch = true - - tags = merge( - local.tags, - { - "Name" = "${var.stack_name}-subnet-public-${element(data.aws_availability_zones.az.names, 0)}" - "Class" = "VPC" - }, - ) - - vpc_id = aws_vpc.platform.id -} - -resource "aws_subnet" "private" { - availability_zone = element(data.aws_availability_zones.az.names, 0) - cidr_block = var.private_subnet - - tags = merge( - local.tags, - { - "Name" = "${var.stack_name}-subnet-private-${element(data.aws_availability_zones.az.names, 0)}" - "Class" = "Subnet" - }, - ) - - vpc_id = aws_vpc.platform.id -} - -resource "aws_route_table" "public" { - vpc_id = aws_vpc.platform.id - - tags = merge( - local.tags, - { - "Name" = "${var.stack_name}-route-table-public" - "Class" = "RouteTable" - }, - ) -} - -resource "aws_route" "public_to_everywhere" { - route_table_id = aws_route_table.public.id - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.platform.id -} - -resource "aws_route_table" "private" { - vpc_id = aws_vpc.platform.id - - tags = merge( - local.tags, - { - "Name" = "${var.stack_name}-route-table-private" - "Class" = "RouteTable" - }, - ) -} - -resource "aws_route" "private_nat_gateway" { - route_table_id = aws_route_table.private.id - destination_cidr_block = "0.0.0.0/0" - nat_gateway_id = aws_nat_gateway.nat_gw.id -} - -resource "aws_main_route_table_association" "main" { - route_table_id = aws_route_table.public.id - vpc_id = aws_vpc.platform.id -} - -resource "aws_route_table_association" "private" { - route_table_id = aws_route_table.private.id - subnet_id = aws_subnet.private.id -} - -resource "aws_route_table_association" "public" { - route_table_id = aws_route_table.public.id - subnet_id = aws_subnet.public.id -} - -resource "aws_eip" "nat_eip" { - vpc = true - depends_on = [aws_internet_gateway.platform] - - tags = merge( - local.tags, - { - "Name" = "${var.stack_name}-eip-nat_eip" - "Class" = "ElasticIP" - }, - ) -} - -resource "aws_nat_gateway" "nat_gw" { - allocation_id = aws_eip.nat_eip.id - subnet_id = aws_subnet.public.id - depends_on = [aws_eip.nat_eip] - - tags = merge( - local.tags, - { - "Name" = "${var.stack_name}-nat_gateway" - "Class" = "NatGateway" - }, - ) -} - diff --git a/ci/infra/aws/output.tf b/ci/infra/aws/output.tf index e5802dc061..2a110cf2e8 100644 --- a/ci/infra/aws/output.tf +++ b/ci/infra/aws/output.tf @@ -1,11 +1,55 @@ -output "control_plane_public_ip" { - value = "${zipmap(aws_instance.control_plane.*.id, aws_instance.control_plane.*.public_ip)}" +output "username" { + value = var.username } -output "control_plane_private_dns" { - value = "${zipmap(aws_instance.control_plane.*.id, aws_instance.control_plane.*.private_dns)}" +output "elb_address" { + value = aws_elb.elb.dns_name } -output "nodes_private_dns" { - value = "${zipmap(aws_instance.nodes.*.id, aws_instance.nodes.*.private_dns)}" +output "vpc_id" { + value = aws_vpc.platform.id +} + +output "public_subnets" { + value = aws_subnet.public.*.id +} + +output "public_cidrs" { + value = aws_subnet.public.*.cidr_block +} + +output "private_subnets" { + value = aws_subnet.private.*.id +} + +output "private_cidrs" { + value = aws_subnet.private.*.cidr_block +} + +output "masters_public_ip" { + value = zipmap( + aws_instance.master.*.id, + aws_instance.master.*.public_ip, + ) +} + +output "masters_private_dns" { + value = zipmap( + aws_instance.master.*.id, + aws_instance.master.*.private_dns, + ) +} + +output "workers_public_ip" { + value = zipmap( + aws_instance.worker.*.id, + aws_instance.worker.*.public_ip, + ) +} + +output "workers_private_dns" { + value = zipmap( + aws_instance.worker.*.id, + aws_instance.worker.*.private_dns + ) } diff --git a/ci/infra/aws/registration.auto.tfvars b/ci/infra/aws/registration.auto.tfvars new file mode 100644 index 0000000000..6e7c2cb666 --- /dev/null +++ b/ci/infra/aws/registration.auto.tfvars @@ -0,0 +1,12 @@ +## To register CaaSP product please use one of the following method +# - register against SUSE Customer Service, with SUSE CaaSP Product Registration Code +# - register against local SUSE Repository Mirroring Server + +# SUSE CaaSP Product Registration Code +#caasp_registry_code = "" + +# SUSE Repository Mirroring Server Name (FQDN) - https://scc.suse.com +#rmt_server_name = "rmt.example.com" + +# SUSE Repository Mirroring Server Name (FQDN) +#suma_server_name = "suma.example.com" diff --git a/ci/infra/aws/aws.tf b/ci/infra/aws/resource-group.tf similarity index 51% rename from ci/infra/aws/aws.tf rename to ci/infra/aws/resource-group.tf index 5bdbd89c95..b449b1ab17 100644 --- a/ci/infra/aws/aws.tf +++ b/ci/infra/aws/resource-group.tf @@ -1,50 +1,6 @@ -locals { - # Do not add the special `kubernetes.io/cluster` here, - # this tag cannot be added to all our resources otherwise the CPI - # will get confused when dealing with security rules objects. - basic_tags = merge( - { - "Name" = var.stack_name - "Environment" = var.stack_name - }, - var.tags, - ) - - tags = merge( - local.basic_tags, - { - format("kubernetes.io/cluster/%v", var.stack_name) = "SUSE-terraform" - }, - ) -} - -provider "aws" { - profile = "default" -} - -resource "aws_key_pair" "kube" { - key_name = "${var.stack_name}-keypair" - public_key = element(var.authorized_keys, 0) - - tags = merge( - local.basic_tags, - { - "Name" = "${var.stack_name}-keypair" - "Class" = "KeyPair" - }, - ) -} - resource "aws_resourcegroups_group" "kube" { - name = "${var.stack_name}-resourcegroup" - - tags = merge( - local.basic_tags, - { - "Name" = "${var.stack_name}-resourcegroup" - "Class" = "ResourceGroup" - }, - ) + count = var.enable_resource_group ? 1 : 0 + name = "${var.stack_name}-resourcegroup" resource_query { query = jsonencode({ @@ -71,4 +27,12 @@ resource "aws_resourcegroups_group" "kube" { ] }) } -} + + tags = merge( + local.basic_tags, + { + "Name" = "${var.stack_name}-resourcegroup" + "Class" = "ResourceGroup" + }, + ) +} \ No newline at end of file diff --git a/ci/infra/aws/security-groups-egress.tf b/ci/infra/aws/security-groups-egress.tf deleted file mode 100644 index ab3c824520..0000000000 --- a/ci/infra/aws/security-groups-egress.tf +++ /dev/null @@ -1,21 +0,0 @@ -resource "aws_security_group" "egress" { - description = "egress traffic" - name = "${var.stack_name}-egress" - vpc_id = aws_vpc.platform.id - - tags = merge( - local.basic_tags, - { - "Name" = "${var.stack_name}-egress" - "Class" = "SecurityGroup" - }, - ) - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - diff --git a/ci/infra/aws/security-groups-load-balancer.tf b/ci/infra/aws/security-groups-load-balancer.tf deleted file mode 100644 index 275a96bef7..0000000000 --- a/ci/infra/aws/security-groups-load-balancer.tf +++ /dev/null @@ -1,48 +0,0 @@ -# A security group for the ELB so it is accessible via the web -resource "aws_security_group" "elb" { - name = "${var.stack_name}-elb" - description = "give access to kube api server" - vpc_id = aws_vpc.platform.id - - tags = merge( - local.basic_tags, - { - "Name" = "${var.stack_name}-elb" - "Class" = "SecurityGroup" - }, - ) - - # HTTP access from anywhere - ingress { - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - # HTTPS access from anywhere - ingress { - from_port = 443 - to_port = 443 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - ingress { - from_port = 6443 - to_port = 6443 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - description = "kubernetes API server" - } - - # Allow access to dex (32000) and gangway (32001) - ingress { - from_port = 32000 - to_port = 32001 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - description = "dex and gangway" - } -} - diff --git a/ci/infra/aws/security-groups-master.tf b/ci/infra/aws/security-groups-master.tf deleted file mode 100644 index f26ceafe26..0000000000 --- a/ci/infra/aws/security-groups-master.tf +++ /dev/null @@ -1,32 +0,0 @@ -resource "aws_security_group" "master" { - description = "security rules for master nodes" - name = "${var.stack_name}-master" - vpc_id = aws_vpc.platform.id - - tags = merge( - local.basic_tags, - { - "Name" = "${var.stack_name}-master" - "Class" = "SecurityGroup" - }, - ) - - # etcd - internal - ingress { - from_port = 2379 - to_port = 2380 - protocol = "tcp" - cidr_blocks = [var.vpc_cidr_block] - description = "etcd" - } - - # api-server - everywhere - ingress { - from_port = 6443 - to_port = 6443 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - description = "kubernetes api-server" - } -} - diff --git a/ci/infra/aws/security-groups-worker.tf b/ci/infra/aws/security-groups-worker.tf deleted file mode 100644 index 9167e412a5..0000000000 --- a/ci/infra/aws/security-groups-worker.tf +++ /dev/null @@ -1,23 +0,0 @@ -# This security group is deliberately left empty, -# it's applied only to worker nodes. -# -# This security group is the only one with the -# `kubernetes.io/cluster/` tag, that makes it discoverable by the -# AWS CPI controller. -# As a result of that, this is going to be the security group the CPI will -# alter to add the rules needed to access the worker nodes from the AWS -# resources dynamically provisioned by the CPI (eg: load balancers). -resource "aws_security_group" "worker" { - description = "security group rules for worker node" - name = "${var.stack_name}-worker" - vpc_id = aws_vpc.platform.id - - tags = merge( - local.tags, - { - "Name" = "${var.stack_name}-worker" - "Class" = "SecurityGroup" - }, - ) -} - diff --git a/ci/infra/aws/security-groups-common.tf b/ci/infra/aws/security-groups.tf similarity index 56% rename from ci/infra/aws/security-groups-common.tf rename to ci/infra/aws/security-groups.tf index 8a39716ef4..7a168f676d 100644 --- a/ci/infra/aws/security-groups-common.tf +++ b/ci/infra/aws/security-groups.tf @@ -4,7 +4,7 @@ resource "aws_security_group" "common" { vpc_id = aws_vpc.platform.id tags = merge( - local.basic_tags, + local.tags, { "Name" = "${var.stack_name}-common" "Class" = "SecurityGroup" @@ -26,7 +26,7 @@ resource "aws_security_group" "common" { to_port = -1 protocol = "icmp" security_groups = [] - cidr_blocks = [var.vpc_cidr_block] + cidr_blocks = [var.cidr_block] description = "allow ICPM traffic egress" } @@ -44,7 +44,7 @@ resource "aws_security_group" "common" { from_port = 4240 to_port = 4240 protocol = "tcp" - cidr_blocks = [var.vpc_cidr_block] + cidr_blocks = [var.cidr_block] description = "cilium - health check - internal" } @@ -53,7 +53,7 @@ resource "aws_security_group" "common" { from_port = 8472 to_port = 8472 protocol = "udp" - cidr_blocks = [var.vpc_cidr_block] + cidr_blocks = [var.cidr_block] description = "cilium - VXLAN traffic - internal" } @@ -62,7 +62,7 @@ resource "aws_security_group" "common" { from_port = 10250 to_port = 10250 protocol = "tcp" - cidr_blocks = [var.vpc_cidr_block] + cidr_blocks = [var.cidr_block] description = "master to worker kubelet communication - internal" } @@ -71,7 +71,7 @@ resource "aws_security_group" "common" { from_port = 10256 to_port = 10256 protocol = "tcp" - cidr_blocks = [var.vpc_cidr_block] + cidr_blocks = [var.cidr_block] description = "kubeproxy health check - internal only" } @@ -96,3 +96,55 @@ resource "aws_security_group" "common" { } } +resource "aws_security_group" "egress" { + description = "egress traffic" + name = "${var.stack_name}-egress" + vpc_id = aws_vpc.platform.id + + tags = merge( + local.tags, + { + "Name" = "${var.stack_name}-egress" + "Class" = "SecurityGroup" + }, + ) + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +# resource "aws_security_group" "default" { +# count = local.security_group_count +# name = module.label.id +# vpc_id = var.vpc_id +# description = "Instance default security group (only egress access is allowed)" +# tags = module.label.tags + +# lifecycle { +# create_before_destroy = true +# } +# } + +# resource "aws_security_group_rule" "egress" { +# count = var.create_default_security_group ? 1 : 0 +# type = "egress" +# from_port = 0 +# to_port = 65535 +# protocol = "-1" +# cidr_blocks = ["0.0.0.0/0"] +# security_group_id = aws_security_group.default[0].id +# } + +# resource "aws_security_group_rule" "ingress" { +# count = var.create_default_security_group ? length(compact(var.allowed_ports)) : 0 +# type = "ingress" +# from_port = var.allowed_ports[count.index] +# to_port = var.allowed_ports[count.index] +# protocol = "tcp" +# cidr_blocks = ["0.0.0.0/0"] +# security_group_id = aws_security_group.default[0].id +# } diff --git a/ci/infra/aws/terraform.tfvars.example b/ci/infra/aws/terraform.tfvars.example index 468ff3779f..9168d366d5 100644 --- a/ci/infra/aws/terraform.tfvars.example +++ b/ci/infra/aws/terraform.tfvars.example @@ -1,34 +1,50 @@ -# prefix for resources -stack_name = "my-k8s" +# AWS region +# A list of region names can be found here: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions +# https://aws.amazon.com/about-aws/global-infrastructure/ +# aws ec2 describe-regions --all-regions +# aws_region = "eu-central-1" +aws_region = "ap-northeast-2" -# Number of master nodes -masters = 1 +# AWS availability zone +# You can get AZ name with the following command: +# aws ec2 describe-availability-zones --region region-name +# +# More information here: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#using-regions-availability-zones-describe +# aws_availability_zones = [ "eu-central-1a", "eu-central-1b", "eu-central-1c" ] +# aws_availability_zones = [ "us-west-1a", "us-west-1b", "us-west-1c" ] +# aws_availability_zones = [ "ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c" ] +aws_availability_zones = [ "ap-northeast-2a", "ap-northeast-2c" ] -# Number of worker nodes -workers = 2 +# Enable Resource Group +#enable_resource_group = true -# Extra tags to add to all the resources -#tags = { -# "key": "value" -#} +# CIDR blocks for vpc +cidr_block = "10.1.0.0/16" -# a SSH public key for accessing the machines -authorized_keys = [ - "ssh-rsa AAAAB3NzaC1yc2EA...", -] +# Identifier to make all your resources unique and avoid clashes with other users of this terraform project +# stack_name = "my-sles-cluster" +stack_name = "sles-101" -## To register CaaSP product please use ONLY ONE of the following method -# - register against SUSE Customer Service, with SUSE CaaSP Product Registration Code -# - register against local SUSE Repository Mirroring Server -# -# SUSE CaaSP Product Registration Code: -#caasp_registry_code = "" -# -# SUSE Repository Mirroring Server Name (FQDN): -#rmt_server_name = "rmt.example.com" +# Extra tags to add to all the resources +tags = { + "stack_name": "sles-101" + # "key": "value" +} # List of VPC IDs to join via VPC peer link -#peer_vpc_ids = ["vpc-id1", "vpc-id2"] +# peer_vpc_ids = ["vpc-id1", "vpc-id2"] + +# Number of master nodes +# masters = 1 +masters = 1 + +# Size of the master nodes +# EXAMPLE: +# master_instance_type = "t2.medium" +master_instance_type = "t2.medium" + +# "Size of the EBS volume, in GB" +master_volume_size = 20 # Name of the IAM profile to associate to control plane nodes. # Leave empty to have terraform create one. @@ -36,6 +52,19 @@ authorized_keys = [ # # Note well: you must have the right set of permissions. # iam_profile_master = "caasp-k8s-master-vm-profile" +# iam_profile_master = "sles-101_cpi_master" + +# Number of worker nodes +# workers = 3 +workers = 2 + +# Size of the worker nodes +# EXAMPLE: +# worker_instance_type = "t2.medium" +worker_instance_type = "t2.medium" + +# "Size of the EBS volume, in GB" +worker_volume_size = 20 # Name of the IAM profile to associate to worker nodes # Leave empty to have terraform create one. @@ -43,9 +72,104 @@ authorized_keys = [ # # Note well: you must have the right set of permissions. # iam_profile_worker = "caasp-k8s-worker-vm-profile" +# iam_profile_worker = "sles-101_cpi_worker" + +# # Username for the cluster nodes +# # EXAMPLE: +# # username = "sles" +# username = "sles" + +# # Password for the cluster nodes +# # EXAMPLE: +# # password = "linux" +# password = "linux" + +# Optional: Define the repositories to use +# EXAMPLE: +# repositories = { +# repository1 = "http://repo.example.com/repository1/" +# repository2 = "http://repo.example.com/repository2/" +# } +repositories = { + devel_languages_go = "http://download.opensuse.org/repositories/devel:/languages:/go/openSUSE_Leap_15.2/" + devel_languages_python = "http://download.opensuse.org/repositories/devel:/languages:/python/openSUSE_Leap_15.2/" + devel_languages_rust = "http://download.opensuse.org/repositories/devel:/languages:/rust/openSUSE_Leap_15.2/" +} +# lb_repositories = {} + +# repositories = { +# sle15sp2_pool = "http://download.suse.de/ibs/SUSE:/SLE-15-SP2:/GA/standard/" +# sle15sp2_update = "http://download.suse.de/ibs/SUSE:/SLE-15-SP2:/Update/standard/" +# sle15_pool = "http://download.suse.de/ibs/SUSE:/SLE-15:/GA/standard/" +# sle15_update = "http://download.suse.de/ibs/SUSE:/SLE-15:/Update/standard/" +# caasp = "http://download.suse.de/ibs/SUSE:/SLE-15-SP2:/Update:/Products:/CAASP:/4.5/standard" +# sle15_debuginfo_pool = "http://download.suse.de/ibs/SUSE/Products/SLE-Module-Basesystem/15/x86_64/product_debug/" +# sle15sp2_debuginfo_pool = "http://download.suse.de/ibs/SUSE/Products/SLE-Module-Basesystem/15-SP2/x86_64/product_debug/" +# } + +# repositories = { +# caasp_devel = "http://download.suse.de/ibs/Devel:/CaaSP:/4.5/SLE_15_SP2/" +# suse_ca = "http://download.suse.de/ibs/SUSE:/CA/SLE_15_SP2/" +# sle_server_pool = "http://download.suse.de/ibs/SUSE/Products/SLE-Product-SLES/15-SP2/x86_64/product/" +# basesystem_pool = "http://download.suse.de/ibs/SUSE/Products/SLE-Module-Basesystem/15-SP2/x86_64/product/" +# containers_pool = "http://download.suse.de/ibs/SUSE/Products/SLE-Module-Containers/15-SP2/x86_64/product/" +# serverapps_pool = "http://download.suse.de/ibs/SUSE/Products/SLE-Module-Server-Applications/15-SP2/x86_64/product/" +# sle_server_updates = "http://download.suse.de/ibs/SUSE/Updates/SLE-Product-SLES/15-SP2/x86_64/update/" +# basesystem_updates = "http://download.suse.de/ibs/SUSE/Updates/SLE-Module-Basesystem/15-SP2/x86_64/update/" +# containers_updates = "http://download.suse.de/ibs/SUSE/Updates/SLE-Module-Containers/15-SP2/x86_64/update/" +# serverapps_updates = "http://download.suse.de/ibs/SUSE/Updates/SLE-Module-Server-Applications/15-SP2/x86_64/update/" +# sle15sp2_pool = "http://download.suse.de/ibs/SUSE:/SLE-15-SP2:/GA/standard/" +# sle15sp2_update = "http://download.suse.de/ibs/SUSE:/SLE-15-SP2:/Update/standard/" +# sle15_debuginfo_pool = "http://download.suse.de/ibs/SUSE/Products/SLE-Module-Basesystem/15/x86_64/product_debug/" +# sle15sp2_debuginfo_pool = "http://download.suse.de/ibs/SUSE/Products/SLE-Module-Basesystem/15-SP2/x86_64/product_debug/" +# } -# Use specific Availibility Zone -#availability_zones_filter= { -# name = "zone-name" -# values = ["eu-west-3c"] -#} +# Minimum required packages. Do not remove them. +# Feel free to add more packages +packages = [] +# packages = [ +# "zypper-needs-restarting", +# "kernel-default", +# "-kernel-default-base", +# "ca-certificates-suse", +# "gdb", +# "systemd-coredump", +# "lz4" +# ] + +# ssh keys to inject into all the nodes +# EXAMPLE: +# authorized_keys = [ +# "ssh-rsa " +# ] +authorized_keys = [ +] + +# ssh key to launch the nodes with +# EXAMPLE: +# key_pair = "my_keypair" +# key_pair = "" + +# IMPORTANT: Replace these ntp servers with ones from your infrastructure +ntp_servers = [ + "TIME.google.com", + "TIME1.google.com", + "TIME2.google.com", + "TIME3.google.com", + "TIME4.google.com" +] + +# DNS servers for the nodes +# dns_nameservers = [ +# "172.28.0.2", +# "8.8.8.8", +# "8.8.4.4", +# "1.1.1.1", +# "1.0.0.1", +# "168.95.1.1", +# "168.95.192.1" +# ] +dns_nameservers = [ + "8.8.8.8", + "8.8.4.4" +] diff --git a/ci/infra/aws/variables.tf b/ci/infra/aws/variables.tf index e70380f10f..e8199d17b7 100644 --- a/ci/infra/aws/variables.tf +++ b/ci/infra/aws/variables.tf @@ -1,67 +1,67 @@ -variable "stack_name" { - default = "k8s" - description = "identifier to make all your resources unique and avoid clashes with other users of this terraform project" -} - -variable "ami_name_pattern" { - default = "suse-sles-15-*" - description = "Pattern for choosing the AMI image" +variable "aws_region" { + type = string + # default = "eu-north-1" + description = "Name of the AWS region to be used" } -variable "authorized_keys" { +variable "aws_availability_zones" { type = list(string) - default = [] - description = "ssh keys to inject into all the nodes. First key will be used for creating a keypair." + # default = ["us-west-2a", "us-west-2b", "us-west-2c"] + description = "List of Availability Zones (e.g. `['us-east-1a', 'us-east-1b', 'us-east-1c']`)" } -variable "public_subnet" { - type = string - description = "CIDR blocks for each public subnet of vpc" - default = "10.1.1.0/24" +variable "enable_resource_group" { + type = bool + default = false + description = "Use this to enable resource group" } -variable "private_subnet" { +variable "cidr_block" { type = string - description = "Private subnet of vpc" - default = "10.1.4.0/24" + default = "10.1.0.0/16" + description = "CIDR blocks for vpc" } -variable "vpc_cidr_block" { +variable "stack_name" { type = string - description = "CIRD blocks for vpc" - default = "10.1.0.0/16" + default = "k8s" + description = "Identifier to make all your resources unique and avoid clashes with other users of this terraform project" } -variable "master_size" { - default = "t2.large" - description = "Size of the master nodes" +variable "tags" { + type = map(string) + default = {} + description = "Extra tags used for the AWS resources created" } -variable "masters" { - default = 1 - description = "Number of master nodes" +variable "authorized_keys" { + type = list(string) + default = [] + description = "SSH keys to inject into all the nodes" } -variable "worker_size" { - default = "t2.medium" - description = "Size of the worker nodes" +variable "key_pair" { + type = string + default = "" + description = "SSH key stored in openstack to create the nodes with" } -variable "workers" { - default = 1 - description = "Number of worker nodes" +variable "ntp_servers" { + type = list(string) + default = [] + description = "List of NTP servers to configure" } -variable "tags" { - type = map(string) - default = {} - description = "Extra tags used for the AWS resources created" +variable "dns_nameservers" { + type = list(string) + default = [] + description = "List of Name servers to configure" } variable "repositories" { - type = list(string) - default = [] - description = "List of extra repositories (as maps with ''='') to add via cloud-init" + type = map(string) + default = {} + description = "Maps of repositories with ''='' to add via cloud-init" } variable "packages" { @@ -77,48 +77,89 @@ variable "packages" { "-docker-img-store-setup", ] - description = "list of additional packages to install" + description = "List of packages to install" +} + +variable "username" { + type = string + default = "sles" + description = "Username for the cluster nodes" +} + +variable "password" { + type = string + default = "linux" + description = "Password for the cluster nodes" } variable "caasp_registry_code" { + type = string default = "" description = "SUSE CaaSP Product Registration Code" } variable "rmt_server_name" { + type = string default = "" description = "SUSE Repository Mirroring Server Name" } variable "suma_server_name" { + type = string default = "" description = "SUSE Manager Server Name" } +variable "peer_vpc_ids" { + type = list(string) + default = [] + description = "IDs of a VPCs to connect to via a peering connection" +} + +variable "masters" { + type = number + default = 1 + description = "Number of master nodes" +} + +variable "master_instance_type" { + type = string + default = "t2.medium" + description = "Instance type of the master nodes" +} + +variable "master_volume_size" { + type = number + default = 20 + description = "Size of the EBS volume, in GB" +} + variable "iam_profile_master" { + type = string default = "" description = "IAM profile associated with the master nodes" } -variable "iam_profile_worker" { - default = "" - description = "IAM profile associated with the worker nodes" +variable "workers" { + type = number + default = 2 + description = "Number of worker nodes" } -variable "peer_vpc_ids" { - type = list(string) - default = [] - description = "IDs of a VPCs to connect to via a peering connection" +variable "worker_instance_type" { + type = string + default = "t2.medium" + description = "Instance type of the worker nodes" +} + +variable "worker_volume_size" { + type = number + default = 20 + description = "Size of the EBS volume, in GB" } -variable "availability_zones_filter" { - type = object({ - name = string - values = list(string) - }) - default = { - name = "zone-name" - values = ["*"] - } - description = "Filter Availability Zones" +variable "iam_profile_worker" { + type = string + default = "" + description = "IAM profile associated with the worker nodes" } diff --git a/ci/infra/aws/versions.tf b/ci/infra/aws/versions.tf index ac97c6ac8e..0c6ab35d39 100644 --- a/ci/infra/aws/versions.tf +++ b/ci/infra/aws/versions.tf @@ -1,4 +1,10 @@ - terraform { required_version = ">= 0.12" + + # required_providers { + # aws = "~> 2.0" + # template = "~> 2.0" + # local = "~> 1.2" + # null = "~> 2.0" + # } } diff --git a/ci/infra/aws/worker-instance.tf b/ci/infra/aws/worker-instance.tf index a9e0f1c32e..a2308e7626 100644 --- a/ci/infra/aws/worker-instance.tf +++ b/ci/infra/aws/worker-instance.tf @@ -1,26 +1,49 @@ -resource "aws_instance" "nodes" { - ami = data.susepubliccloud_image_ids.sles15sp2_chost_byos.ids[0] - associate_public_ip_address = false - count = var.workers - instance_type = var.worker_size - key_name = aws_key_pair.kube.key_name - source_dest_check = false - user_data = data.template_cloudinit_config.cfg.rendered - iam_instance_profile = length(var.iam_profile_worker) == 0 ? local.aws_iam_instance_profile_worker_terraform : var.iam_profile_worker - subnet_id = aws_subnet.private.id - - depends_on = [ - aws_route.private_nat_gateway, - aws_iam_instance_profile.worker, - ] +# This security group is deliberately left empty, +# it's applied only to worker nodes. +# +# This security group is the only one with the +# `kubernetes.io/cluster/` tag, that makes it discoverable by the +# AWS CPI controller. +# As a result of that, this is going to be the security group the CPI will +# alter to add the rules needed to access the worker nodes from the AWS +# resources dynamically provisioned by the CPI (eg: load balancers). +resource "aws_security_group" "worker" { + description = "security group rules for worker node" + name = "${var.stack_name}-worker" + vpc_id = aws_vpc.platform.id tags = merge( local.tags, { - "Name" = "${var.stack_name}-node-${count.index}" - "Class" = "Instance" + "Name" = "${var.stack_name}-worker" + "Class" = "SecurityGroup" }, ) +} + +# https://www.terraform.io/docs/providers/aws/r/instance.html +resource "aws_instance" "worker" { + + count = var.workers + ami = data.susepubliccloud_image_ids.sles15sp2_chost_byos.ids[0] + instance_type = var.worker_instance_type + key_name = aws_key_pair.kube.key_name + source_dest_check = false + + availability_zone = var.aws_availability_zones[count.index % length(var.aws_availability_zones)] + # associate_public_ip_address = false + # subnet_id = aws_subnet.private[count.index % length(var.aws_availability_zones)].id + associate_public_ip_address = true + subnet_id = aws_subnet.public[count.index % length(var.aws_availability_zones)].id + user_data = data.template_cloudinit_config.cfg.rendered + iam_instance_profile = length(var.iam_profile_worker) == 0 ? aws_iam_instance_profile.worker.0.name : var.iam_profile_worker + # ebs_optimized = true + + depends_on = [ + # aws_route.private_nat_gateway, + aws_internet_gateway.platform, + aws_iam_instance_profile.worker, + ] vpc_security_group_ids = [ aws_security_group.egress.id, @@ -36,8 +59,15 @@ resource "aws_instance" "nodes" { root_block_device { volume_type = "gp2" - volume_size = 20 + volume_size = var.worker_volume_size delete_on_termination = true } -} + tags = merge( + local.tags, + { + "Name" = "${var.stack_name}-worker-${count.index}" + "Class" = "Instance" + }, + ) +}