Skip to content

Commit

Permalink
Update to TF v0.12.12 (kubernetes-sigs#5267)
Browse files Browse the repository at this point in the history
  • Loading branch information
Miouge1 authored and k8s-ci-robot committed Dec 4, 2019
1 parent b06826e commit b1fbead
Show file tree
Hide file tree
Showing 8 changed files with 69 additions and 71 deletions.
18 changes: 8 additions & 10 deletions .gitlab-ci/terraform.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,9 @@
# Set Ansible config
- cp ansible.cfg ~/.ansible.cfg
# Prepare inventory
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
- ln -s contrib/terraform/$PROVIDER/hosts
- terraform init contrib/terraform/$PROVIDER
- cp contrib/terraform/$PROVIDER/sample-inventory/$VARIABLEFILE .
# Copy SSH keypair
- mkdir -p ~/.ssh
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
Expand All @@ -24,8 +23,7 @@
stage: unit-tests
only: ['master', /^pr-.*$/]
script:
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
- terraform validate -var-file=$VARIABLEFILE contrib/terraform/$PROVIDER
- terraform validate -var-file=cluster.tfvars contrib/terraform/$PROVIDER
- terraform fmt -check -diff contrib/terraform/$PROVIDER

.terraform_apply:
Expand All @@ -48,7 +46,7 @@
tf-validate-openstack:
extends: .terraform_validate
variables:
TF_VERSION: 0.12.6
TF_VERSION: 0.12.12
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME

Expand All @@ -62,14 +60,14 @@ tf-validate-packet:
tf-validate-aws:
extends: .terraform_validate
variables:
TF_VERSION: 0.11.11
TF_VERSION: 0.12.12
PROVIDER: aws
CLUSTER: $CI_COMMIT_REF_NAME

# tf-packet-ubuntu16-default:
# extends: .terraform_apply
# variables:
# TF_VERSION: 0.11.11
# TF_VERSION: 0.12.12
# PROVIDER: packet
# CLUSTER: $CI_COMMIT_REF_NAME
# TF_VAR_number_of_k8s_masters: "1"
Expand All @@ -83,7 +81,7 @@ tf-validate-aws:
# tf-packet-ubuntu18-default:
# extends: .terraform_apply
# variables:
# TF_VERSION: 0.11.11
# TF_VERSION: 0.12.12
# PROVIDER: packet
# CLUSTER: $CI_COMMIT_REF_NAME
# TF_VAR_number_of_k8s_masters: "1"
Expand All @@ -110,7 +108,7 @@ tf-ovh_ubuntu18-calico:
when: on_success
variables:
<<: *ovh_variables
TF_VERSION: 0.12.6
TF_VERSION: 0.12.12
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
Expand Down Expand Up @@ -138,7 +136,7 @@ tf-ovh_coreos-calico:
when: on_success
variables:
<<: *ovh_variables
TF_VERSION: 0.12.6
TF_VERSION: 0.12.12
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
Expand Down
84 changes: 42 additions & 42 deletions contrib/terraform/aws/create-infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,30 +16,30 @@ data "aws_availability_zones" "available" {}
*/

module "aws-vpc" {
source = "modules/vpc"
source = "./modules/vpc"

aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}"
aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}"
aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}"
default_tags = "${var.default_tags}"
}

module "aws-elb" {
source = "modules/elb"
source = "./modules/elb"

aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_id = "${module.aws-vpc.aws_vpc_id}"
aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}"
aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}"
aws_elb_api_port = "${var.aws_elb_api_port}"
k8s_secure_api_port = "${var.k8s_secure_api_port}"
default_tags = "${var.default_tags}"
}

module "aws-iam" {
source = "modules/iam"
source = "./modules/iam"

aws_cluster_name = "${var.aws_cluster_name}"
}
Expand All @@ -54,18 +54,18 @@ resource "aws_instance" "bastion-server" {
instance_type = "${var.aws_bastion_size}"
count = "${length(var.aws_cidr_subnets_public)}"
associate_public_ip_address = true
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public, count.index)}"

vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"

key_name = "${var.AWS_SSH_KEY_NAME}"

tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
"Cluster", "${var.aws_cluster_name}",
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
))}"
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
"Cluster", "${var.aws_cluster_name}",
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
))}"
}

/*
Expand All @@ -79,25 +79,25 @@ resource "aws_instance" "k8s-master" {

count = "${var.aws_kube_master_num}"

availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"

vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"

iam_instance_profile = "${module.aws-iam.kube-master-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}"

tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "master"
))}"
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "master"
))}"
}

resource "aws_elb_attachment" "attach_master_nodes" {
count = "${var.aws_kube_master_num}"
elb = "${module.aws-elb.aws_elb_api_id}"
instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
instance = "${element(aws_instance.k8s-master.*.id, count.index)}"
}

resource "aws_instance" "k8s-etcd" {
Expand All @@ -106,18 +106,18 @@ resource "aws_instance" "k8s-etcd" {

count = "${var.aws_etcd_num}"

availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"

vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"

key_name = "${var.AWS_SSH_KEY_NAME}"

tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "etcd"
))}"
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "etcd"
))}"
}

resource "aws_instance" "k8s-worker" {
Expand All @@ -126,19 +126,19 @@ resource "aws_instance" "k8s-worker" {

count = "${var.aws_kube_worker_num}"

availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"

vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"

iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}"

tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "worker"
))}"
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "worker"
))}"
}

/*
Expand All @@ -148,14 +148,14 @@ resource "aws_instance" "k8s-worker" {
data "template_file" "inventory" {
template = "${file("${path.module}/templates/inventory.tpl")}"

vars {
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}"
connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
vars = {
public_ip_address_bastion = "${join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))}"
connection_strings_master = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
connection_strings_etcd = "${join("\n",formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
connection_strings_etcd = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
list_master = "${join("\n", aws_instance.k8s-master.*.tags.Name)}"
list_node = "${join("\n", aws_instance.k8s-worker.*.tags.Name)}"
list_etcd = "${join("\n", aws_instance.k8s-etcd.*.tags.Name)}"
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
}
}
Expand All @@ -165,7 +165,7 @@ resource "null_resource" "inventories" {
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
}

triggers {
triggers = {
template = "${data.template_file.inventory.rendered}"
}
}
2 changes: 1 addition & 1 deletion contrib/terraform/aws/modules/elb/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ resource "aws_security_group_rule" "aws-allow-api-egress" {
# Create a new AWS ELB for K8S API
resource "aws_elb" "aws-elb-api" {
name = "kubernetes-elb-${var.aws_cluster_name}"
subnets = ["${var.aws_subnet_ids_public}"]
subnets = "${var.aws_subnet_ids_public}"
security_groups = ["${aws_security_group.aws-elb.id}"]

listener {
Expand Down
20 changes: 10 additions & 10 deletions contrib/terraform/aws/terraform.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -2,34 +2,34 @@
aws_cluster_name = "devtest"

#VPC Vars
aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]

#Bastion Host
aws_bastion_size = "t2.medium"


#Kubernetes Cluster

aws_kube_master_num = 3
aws_kube_master_num = 3
aws_kube_master_size = "t2.medium"

aws_etcd_num = 3
aws_etcd_num = 3
aws_etcd_size = "t2.medium"

aws_kube_worker_num = 4
aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium"

#Settings AWS ELB

aws_elb_api_port = 6443
k8s_secure_api_port = 6443
aws_elb_api_port = 6443
k8s_secure_api_port = 6443
kube_insecure_apiserver_address = "0.0.0.0"

default_tags = {
# Env = "devtest"
# Product = "kubernetes"
# Env = "devtest"
# Product = "kubernetes"
}

inventory_file = "../../../inventory/hosts"
10 changes: 5 additions & 5 deletions contrib/terraform/packet/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ now six total etcd replicas.

## SSH Key Setup

An SSH keypair is required so Ansible can access the newly provisioned nodes (bare metal Packet hosts). By default, the public SSH key defined in cluster.tf will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Packet (i.e. via the portal), then set the public keyfile name in cluster.tf to blank to prevent the duplicate key from being uploaded which will cause an error.
An SSH keypair is required so Ansible can access the newly provisioned nodes (bare metal Packet hosts). By default, the public SSH key defined in cluster.tfvars will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Packet (i.e. via the portal), then set the public keyfile name in cluster.tfvars to blank to prevent the duplicate key from being uploaded which will cause an error.

If you don't already have a keypair generated (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub), then a new keypair can be generated with the command:

Expand Down Expand Up @@ -72,7 +72,7 @@ If someone gets this key, they can startup/shutdown hosts in your project!
For more information on how to generate an API key or find your project ID, please see:
https://support.packet.com/kb/articles/api-integrations

The Packet Project ID associated with the key will be set later in cluster.tf.
The Packet Project ID associated with the key will be set later in cluster.tfvars.

For more information about the API, please see:
https://www.packet.com/developers/api/
Expand All @@ -88,7 +88,7 @@ Note that to deploy several clusters within the same project you need to use [te
The construction of the cluster is driven by values found in
[variables.tf](variables.tf).

For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.

The `cluster_name` is used to set a tag on each server deployed as part of this cluster.
This helps when identifying which hosts are associated with each cluster.
Expand Down Expand Up @@ -138,7 +138,7 @@ This should finish fairly quickly telling you Terraform has successfully initial
You can apply the Terraform configuration to your cluster with the following command
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
```ShellSession
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/packet
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet
$ export ANSIBLE_HOST_KEY_CHECKING=False
$ ansible-playbook -i hosts ../../cluster.yml
```
Expand All @@ -147,7 +147,7 @@ $ ansible-playbook -i hosts ../../cluster.yml
You can destroy your new cluster with the following command issued from the cluster's inventory directory:

```ShellSession
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/packet
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet
```

If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
Expand Down
6 changes: 3 additions & 3 deletions docs/packet.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Grab the latest version of Terraform and install it.
```bash
echo "https://releases.hashicorp.com/terraform/$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')/terraform_$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')_darwin_amd64.zip"
sudo yum install unzip
sudo unzip terraform_0.11.11_linux_amd64.zip -d /usr/local/bin/
sudo unzip terraform_0.12.12_linux_amd64.zip -d /usr/local/bin/
```

## Download Kubespray
Expand All @@ -67,7 +67,7 @@ Details about the cluster, such as the name, as well as the authentication token
for Packet need to be defined. To find these values see [Packet API Integration](https://support.packet.com/kb/articles/api-integrations)

```bash
vi cluster.tf
vi cluster.tfvars
```
* cluster_name = alpha
* packet_project_id = ABCDEFGHIJKLMNOPQRSTUVWXYZ123456
Expand All @@ -84,7 +84,7 @@ terraform init ../../contrib/terraform/packet/
Run Terraform to deploy the hardware.

```bash
terraform apply -var-file=cluster.tf ../../contrib/terraform/packet
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet
```

## Run Kubespray Playbooks
Expand Down

0 comments on commit b1fbead

Please sign in to comment.