Skip to content

Commit

Permalink
Adds tweaks and formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
jlerche committed May 15, 2019
1 parent b06cfda commit 34ee7a5
Show file tree
Hide file tree
Showing 6 changed files with 106 additions and 87 deletions.
4 changes: 2 additions & 2 deletions deploy/gcp/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ gcloud services enable container.googleapis.com
Now we can launch the script:

```bash
git clone https://github.com/pingcap/tidb-operator
git clone --depth=1 https://github.com/pingcap/tidb-operator
cd tidb-operator/deploy/gcp
terraform init
terraform apply
Expand Down Expand Up @@ -82,7 +82,7 @@ To upgrade TiDB cluster, modify `tidb_version` variable to a higher version in v
## Scale TiDB cluster

To scale TiDB cluster, modify `tikv_count` or `tidb_count` to your desired count, and then run `terraform apply`.
To scale TiDB cluster, modify `tikv_count`, `tikv_replica_count`, `tidb_count`, and `tidb_replica_count` to your desired count, and then run `terraform apply`.

> *Note*: Currently, scaling in is not supported since we cannot determine which node to scale. Scaling out needs a few minutes to complete, you can watch the scaling out by `watch kubectl --kubeconfig credentials/kubeconfig_<cluster_name> get po -n tidb`
Expand Down
9 changes: 5 additions & 4 deletions deploy/gcp/data.tf
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
data "template_file" "tidb_cluster_values" {
template = "${file("${path.module}/templates/tidb-cluster-values.yaml.tpl")}"
vars {

vars {
cluster_version = "${var.tidb_version}"
pd_replicas = "${var.pd_replica_count}"
tikv_replicas = "${var.tikv_replica_count}"
tidb_replicas = "${var.tidb_replica_count}"
pd_replicas = "${var.pd_replica_count}"
tikv_replicas = "${var.tikv_replica_count}"
tidb_replicas = "${var.tidb_replica_count}"
}
}

Expand Down
164 changes: 89 additions & 75 deletions deploy/gcp/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,20 @@ variable "GCP_PROJECT" {}

provider "google" {
credentials = "${file("${var.GCP_CREDENTIALS_PATH}")}"
region = "${var.GCP_REGION}"
project = "${var.GCP_PROJECT}"
region = "${var.GCP_REGION}"
project = "${var.GCP_PROJECT}"
}

// required for taints on node pools
provider "google-beta" {
credentials = "${file("${var.GCP_CREDENTIALS_PATH}")}"
region = "${var.GCP_REGION}"
project = "${var.GCP_PROJECT}"
region = "${var.GCP_REGION}"
project = "${var.GCP_PROJECT}"
}

locals {
credential_path = "${path.module}/credentials"
kubeconfig = "${local.credential_path}/kubeconfig_${var.cluster_name}"
credential_path = "${path.module}/credentials"
kubeconfig = "${local.credential_path}/kubeconfig_${var.cluster_name}"
tidb_cluster_values_path = "${path.module}/rendered/tidb-cluster-values.yaml"
}

Expand All @@ -28,39 +28,41 @@ resource "null_resource" "prepare-dir" {
}

resource "google_compute_network" "vpc_network" {
name = "vpc-network"
name = "vpc-network"
auto_create_subnetworks = false
project = "${var.GCP_PROJECT}"
project = "${var.GCP_PROJECT}"
}

resource "google_compute_subnetwork" "private_subnet" {
ip_cidr_range = "172.31.252.0/22"
name = "private-subnet"
network = "${google_compute_network.vpc_network.self_link}"
project = "${var.GCP_PROJECT}"
name = "private-subnet"
network = "${google_compute_network.vpc_network.self_link}"
project = "${var.GCP_PROJECT}"

secondary_ip_range {
ip_cidr_range = "172.30.0.0/16"
range_name = "pods-${var.GCP_REGION}"
range_name = "pods-${var.GCP_REGION}"
}

secondary_ip_range {
ip_cidr_range = "172.31.224.0/20"
range_name = "services-${var.GCP_REGION}"
range_name = "services-${var.GCP_REGION}"
}
}

resource "google_compute_subnetwork" "public_subnet" {
ip_cidr_range = "172.29.252.0/22"
name = "public-subnet"
network = "${google_compute_network.vpc_network.self_link}"
project = "${var.GCP_PROJECT}"
name = "public-subnet"
network = "${google_compute_network.vpc_network.self_link}"
project = "${var.GCP_PROJECT}"
}

resource "google_container_cluster" "cluster" {
name = "${var.cluster_name}"
network = "${google_compute_network.vpc_network.self_link}"
name = "${var.cluster_name}"
network = "${google_compute_network.vpc_network.self_link}"
subnetwork = "${google_compute_subnetwork.private_subnet.self_link}"
location = "${var.GCP_REGION}"
project = "${var.GCP_PROJECT}"
location = "${var.GCP_REGION}"
project = "${var.GCP_PROJECT}"

master_auth {
username = ""
Expand All @@ -78,180 +80,191 @@ resource "google_container_cluster" "cluster" {
}

remove_default_node_pool = true
initial_node_count = 1
initial_node_count = 1

min_master_version = "latest"
}


resource "google_container_node_pool" "pd_pool" {
provider = "google-beta"
project = "${var.GCP_PROJECT}"
cluster = "${google_container_cluster.cluster.name}"
location = "${google_container_cluster.cluster.location}"
name = "pd-pool"
provider = "google-beta"
project = "${var.GCP_PROJECT}"
cluster = "${google_container_cluster.cluster.name}"
location = "${google_container_cluster.cluster.location}"
name = "pd-pool"
initial_node_count = "${var.pd_count}"

node_config {
machine_type = "${var.pd_instance_type}"
machine_type = "${var.pd_instance_type}"
local_ssd_count = 1

taint {
effect = "NO_SCHEDULE"
key = "dedicated"
value = "pd"
key = "dedicated"
value = "pd"
}

labels {
dedicated = "pd"
}
tags = ["pd"]

tags = ["pd"]
oauth_scopes = ["storage-ro", "logging-write", "monitoring"]
}

}

resource "google_container_node_pool" "tikv_pool" {
provider = "google-beta"
project = "${var.GCP_PROJECT}"
cluster = "${google_container_cluster.cluster.name}"
location = "${google_container_cluster.cluster.location}"
name = "tikv-pool"
provider = "google-beta"
project = "${var.GCP_PROJECT}"
cluster = "${google_container_cluster.cluster.name}"
location = "${google_container_cluster.cluster.location}"
name = "tikv-pool"
initial_node_count = "${var.tikv_count}"

node_config {
machine_type = "${var.tikv_instance_type}"
machine_type = "${var.tikv_instance_type}"
local_ssd_count = 1

taint {
effect = "NO_SCHEDULE"
key = "dedicated"
value = "tikv"
key = "dedicated"
value = "tikv"
}

labels {
dedicated = "tikv"
}
tags = ["tikv"]
oauth_scopes = ["storage-ro", "logging-write", "monitoring"]

tags = ["tikv"]
oauth_scopes = ["storage-ro", "logging-write", "monitoring"]
}

}

resource "google_container_node_pool" "tidb_pool" {
provider = "google-beta"
project = "${var.GCP_PROJECT}"
cluster = "${google_container_cluster.cluster.name}"
location = "${google_container_cluster.cluster.location}"
name = "tidb-pool"
provider = "google-beta"
project = "${var.GCP_PROJECT}"
cluster = "${google_container_cluster.cluster.name}"
location = "${google_container_cluster.cluster.location}"
name = "tidb-pool"
initial_node_count = "${var.tidb_count}"

node_config {
machine_type = "${var.tidb_instance_type}"

taint {
effect = "NO_SCHEDULE"
key = "dedicated"
value = "tidb"
key = "dedicated"
value = "tidb"
}

labels {
dedicated = "tidb"
}
tags = ["tidb"]

tags = ["tidb"]
oauth_scopes = ["storage-ro", "logging-write", "monitoring"]
}

}

resource "google_container_node_pool" "monitor_pool" {
project = "${var.GCP_PROJECT}"
cluster = "${google_container_cluster.cluster.name}"
location = "${google_container_cluster.cluster.location}"
name = "monitor-pool"
project = "${var.GCP_PROJECT}"
cluster = "${google_container_cluster.cluster.name}"
location = "${google_container_cluster.cluster.location}"
name = "monitor-pool"
initial_node_count = "1"

node_config {
machine_type = "${var.monitor_instance_type}"
tags = ["monitor"]
tags = ["monitor"]
oauth_scopes = ["storage-ro", "logging-write", "monitoring"]
}

}

resource "google_compute_firewall" "allow_ssh_bastion" {
name = "allow-ssh-bastion"
name = "allow-ssh-bastion"
network = "${google_compute_network.vpc_network.self_link}"
project = "${var.GCP_PROJECT}"

allow {
protocol = "tcp"
ports = ["22"]
ports = ["22"]
}

source_ranges = ["0.0.0.0/0"]
target_tags = ["bastion"]
target_tags = ["bastion"]
}

resource "google_compute_firewall" "allow_mysql_from_bastion" {
name = "allow-mysql-from-bastion"
name = "allow-mysql-from-bastion"
network = "${google_compute_network.vpc_network.self_link}"
project = "${var.GCP_PROJECT}"

allow {
protocol = "tcp"
ports = ["4000"]
ports = ["4000"]
}

source_tags = ["bastion"]
target_tags = ["tidb"]
}

resource "google_compute_firewall" "allow_ssh_from_bastion" {
name = "allow-ssh-from-bastion"
name = "allow-ssh-from-bastion"
network = "${google_compute_network.vpc_network.self_link}"
project = "${var.GCP_PROJECT}"

allow {
protocol = "tcp"
ports = ["22"]
ports = ["22"]
}

source_tags = ["bastion"]
target_tags = ["tidb", "tikv", "pd", "monitor"]
}

resource "google_compute_instance" "bastion" {
project = "${var.GCP_PROJECT}"
zone = "${var.GCP_REGION}-a"
project = "${var.GCP_PROJECT}"
zone = "${var.GCP_REGION}-a"
machine_type = "${var.bastion_instance_type}"
name = "bastion"
name = "bastion"

"boot_disk" {
initialize_params {
image = "ubuntu-os-cloud/ubuntu-1804-lts"
}
}

"network_interface" {
subnetwork = "${google_compute_subnetwork.public_subnet.self_link}"
access_config {}
subnetwork = "${google_compute_subnetwork.public_subnet.self_link}"
access_config = {}
}

tags = ["bastion"]

metadata_startup_script = "sudo apt-get install -y mysql-client && curl -s https://packagecloud.io/install/repositories/akopytov/sysbench/script.rpm.sh | bash && sudo apt-get -y install sysbench"
metadata_startup_script = "sudo apt-get install -y mysql-client && curl -s https://packagecloud.io/install/repositories/akopytov/sysbench/script.deb.sh | bash && sudo apt-get -y install sysbench"
}

resource "null_resource" "get-credentials" {
provisioner "local-exec" {
command = "gcloud container clusters get-credentials ${google_container_cluster.cluster.name} --region ${var.GCP_REGION}"

environment {
KUBECONFIG= "${local.kubeconfig}"
KUBECONFIG = "${local.kubeconfig}"
}
}
}

resource "local_file" "tidb-cluster-values" {
depends_on = ["data.template_file.tidb_cluster_values"]
filename = "${local.tidb_cluster_values_path}"
content = "${data.template_file.tidb_cluster_values.rendered}"
filename = "${local.tidb_cluster_values_path}"
content = "${data.template_file.tidb_cluster_values.rendered}"
}

resource "null_resource" "setup-env" {
depends_on = ["google_container_cluster.cluster", "null_resource.get-credentials"]

provisioner "local-exec" {
working_dir = "${path.module}"

command = <<EOS
kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user $$(gcloud config get-value account)
kubectl create serviceaccount --namespace kube-system tiller
Expand All @@ -265,8 +278,9 @@ until helm ls; do
done
helm install --namespace tidb-admin --name tidb-operator ${path.module}/charts/tidb-operator
EOS

environment {
KUBECONFIG= "${local.kubeconfig}"
KUBECONFIG = "${local.kubeconfig}"
}
}
}
Expand Down
4 changes: 4 additions & 0 deletions deploy/gcp/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ output "cluster_id" {
value = "${google_container_cluster.cluster.id}"
}

output "cluster_name" {
value = "${google_container_cluster.cluster.name}"
}

output "kubeconfig_file" {
value = "${local.kubeconfig}"
}
Expand Down
2 changes: 1 addition & 1 deletion deploy/gcp/templates/tidb-cluster-values.yaml.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ services:
type: ClusterIP

discovery:
image: pingcap/tidb-operator:latest
image: pingcap/tidb-operator:v1.0.0-beta.2
imagePullPolicy: IfNotPresent
resources:
limits:
Expand Down
Loading

0 comments on commit 34ee7a5

Please sign in to comment.