Skip to content

Commit

Permalink
create tidb cluster on ack with cr (#2012)
Browse files Browse the repository at this point in the history
* create tidb cluster on ack with cr

* update variable name

* update variable name

* update default replicas
  • Loading branch information
DanielZhangQD authored Mar 24, 2020
1 parent f66627c commit df048e2
Show file tree
Hide file tree
Showing 7 changed files with 226 additions and 13 deletions.
22 changes: 11 additions & 11 deletions deploy/aliyun/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -71,17 +71,17 @@ module "tidb-cluster" {
helm = helm.default
}

cluster_name = "my-cluster"
cluster_name = var.tidb_cluster_name
ack = module.tidb-operator

tidb_version = var.tidb_version
tidb_cluster_chart_version = var.tidb_cluster_chart_version
pd_instance_type = var.pd_instance_type
pd_count = var.pd_count
tikv_instance_type = var.tikv_instance_type
tikv_count = var.tikv_count
tidb_instance_type = var.tidb_instance_type
tidb_count = var.tidb_count
monitor_instance_type = var.monitor_instance_type
override_values = file("my-cluster.yaml")
tidb_version = var.tidb_version
tidb_cluster_chart_version = var.tidb_cluster_chart_version
pd_instance_type = var.pd_instance_type
pd_count = var.pd_count
tikv_instance_type = var.tikv_instance_type
tikv_count = var.tikv_count
tidb_instance_type = var.tidb_instance_type
tidb_count = var.tidb_count
monitor_instance_type = var.monitor_instance_type
create_tidb_cluster_release = var.create_tidb_cluster_release
}
86 changes: 86 additions & 0 deletions deploy/aliyun/manifests/db-monitor.yaml.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
apiVersion: pingcap.com/v1alpha1
kind: TidbMonitor
metadata:
name: TIDB_CLUSTER_NAME
spec:
alertmanagerURL: ""
annotations: {}
clusters:
- name: TIDB_CLUSTER_NAME
grafana:
baseImage: grafana/grafana
envs:
# Configure Grafana using environment variables except GF_PATHS_DATA, GF_SECURITY_ADMIN_USER and GF_SECURITY_ADMIN_PASSWORD
# Ref https://grafana.com/docs/installation/configuration/#using-environment-variables
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_NAME: "Main Org."
GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer"
# if grafana is running behind a reverse proxy with subpath http://foo.bar/grafana
# GF_SERVER_DOMAIN: foo.bar
# GF_SERVER_ROOT_URL: "%(protocol)s://%(domain)s/grafana/"
imagePullPolicy: IfNotPresent
logLevel: info
password: admin
resources: {}
# limits:
# cpu: 8000m
# memory: 8Gi
# requests:
# cpu: 4000m
# memory: 4Gi
service:
portName: http-grafana
type: LoadBalancer
annotations:
service.beta.kubernetes.io/alicloud-loadbalancer-address-type: internet
username: admin
version: 6.0.1
imagePullPolicy: IfNotPresent
initializer:
baseImage: pingcap/tidb-monitor-initializer
imagePullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 50m
# memory: 64Mi
# requests:
# cpu: 50m
# memory: 64Mi
version: v3.0.12
kubePrometheusURL: ""
nodeSelector: {}
persistent: true
prometheus:
baseImage: prom/prometheus
imagePullPolicy: IfNotPresent
logLevel: info
reserveDays: 12
resources: {}
# limits:
# cpu: 8000m
# memory: 8Gi
# requests:
# cpu: 4000m
# memory: 4Gi
service:
portName: http-prometheus
type: NodePort
version: v2.11.1
reloader:
baseImage: pingcap/tidb-monitor-reloader
imagePullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 50m
# memory: 64Mi
# requests:
# cpu: 50m
# memory: 64Mi
service:
portName: tcp-reloader
type: NodePort
version: v1.0.1
storage: 100Gi
storageClassName: alicloud-disk-available
tolerations: []

110 changes: 110 additions & 0 deletions deploy/aliyun/manifests/db.yaml.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
apiVersion: pingcap.com/v1alpha1
kind: TidbCluster
metadata:
name: TIDB_CLUSTER_NAME
spec:
configUpdateStrategy: RollingUpdate
enableTLSCluster: false
helper:
image: busybox:1.31.1
hostNetwork: false
imagePullPolicy: IfNotPresent
pd:
affinity: {}
baseImage: pingcap/pd
config:
log:
level: info
nodeSelector:
dedicated: TIDB_CLUSTER_NAME-pd
podSecurityContext: {}
replicas: 3
requests:
cpu: "1"
memory: 400Mi
storage: 20Gi
storageClassName: alicloud-disk
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: TIDB_CLUSTER_NAME-pd
pvReclaimPolicy: Retain
schedulerName: tidb-scheduler
tidb:
affinity: {}
annotations:
tidb.pingcap.com/sysctl-init: "true"
baseImage: pingcap/tidb
config:
log:
level: info
performance:
max-procs: 0
tcp-keep-alive: true
enableTLSClient: false
maxFailoverCount: 3
nodeSelector:
dedicated: TIDB_CLUSTER_NAME-tidb
podSecurityContext:
sysctls:
- name: net.ipv4.tcp_keepalive_time
value: "300"
- name: net.ipv4.tcp_keepalive_intvl
value: "75"
- name: net.core.somaxconn
value: "32768"
replicas: 2
requests:
cpu: "1"
memory: 400Mi
separateSlowLog: true
service:
annotations:
service.beta.kubernetes.io/alicloud-loadbalancer-address-type: intranet
service.beta.kubernetes.io/alicloud-loadbalancer-slb-network-type: vpc
exposeStatus: true
externalTrafficPolicy: Local
type: LoadBalancer
slowLogTailer:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 20m
memory: 5Mi
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: TIDB_CLUSTER_NAME-tidb
tikv:
affinity: {}
annotations:
tidb.pingcap.com/sysctl-init: "true"
baseImage: pingcap/tikv
config:
log-level: info
hostNetwork: false
maxFailoverCount: 3
nodeSelector:
dedicated: TIDB_CLUSTER_NAME-tikv
podSecurityContext:
sysctls:
- name: net.core.somaxconn
value: "32768"
privileged: false
replicas: 3
requests:
cpu: "1"
memory: 2Gi
storage: 100Gi
storageClassName: local-volume
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: TIDB_CLUSTER_NAME-tikv
timezone: UTC
version: v3.0.12

12 changes: 11 additions & 1 deletion deploy/aliyun/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ variable "bastion_cpu_core_count" {

variable "operator_version" {
type = string
default = "v1.0.6"
default = "v1.1.0"
}

variable "operator_helm_values" {
Expand Down Expand Up @@ -112,3 +112,13 @@ variable "vpc_cidr" {
description = "VPC cidr_block, options: [192.168.0.0.0/16, 172.16.0.0/16, 10.0.0.0/8], cannot collidate with kubernetes service cidr and pod cidr. Cannot change once the vpc created."
default = "192.168.0.0/16"
}

variable "create_tidb_cluster_release" {
description = "whether creating tidb-cluster helm release"
default = false
}

variable "tidb_cluster_name" {
description = "The TiDB cluster name"
default = "my-cluster"
}
1 change: 1 addition & 0 deletions deploy/modules/aliyun/tidb-cluster/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,5 @@ module "tidb-cluster" {
base_values = file("${path.module}/values/default.yaml")
kubeconfig_filename = var.ack.kubeconfig_filename
service_ingress_key = "ip"
create = var.create_tidb_cluster_release
}
5 changes: 5 additions & 0 deletions deploy/modules/aliyun/tidb-cluster/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,8 @@ variable "local_exec_interpreter" {
type = list(string)
default = ["/bin/sh", "-c"]
}

variable "create_tidb_cluster_release" {
description = "whether creating tidb-cluster helm release"
default = false
}
3 changes: 2 additions & 1 deletion deploy/modules/aliyun/tidb-operator/operator.tf
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,11 @@ resource "null_resource" "setup-env" {
# it manually and the resource namespace & name are hard-coded by convention
command = <<EOS
kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/${var.operator_version}/manifests/crd.yaml
kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/${var.operator_version}/manifests/tiller-rbac.yaml
kubectl apply -f ${path.module}/manifest/alicloud-disk-storageclass.yaml
echo '${data.template_file.local-volume-provisioner.rendered}' | kubectl apply -f -
kubectl patch -n kube-system daemonset flexvolume --type='json' -p='[{"op":"replace", "path": "/spec/template/spec/tolerations", "value":[{"operator": "Exists"}]}]'
helm init
helm init --upgrade --tiller-image registry.cn-hangzhou.aliyuncs.com/google_containers/tiller:$(helm version --client --short | grep -Eo 'v[0-9]\.[0-9]+\.[0-9]+') --service-account tiller
until helm ls; do
echo "Wait tiller ready"
done
Expand Down

0 comments on commit df048e2

Please sign in to comment.