Skip to content
This repository has been archived by the owner on Jun 24, 2021. It is now read-only.

Update drainer config #745

Merged
merged 3 commits into from
Apr 30, 2019
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 29 additions & 18 deletions conf/drainer-cluster.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,15 @@
# the interval time (in seconds) of detect pumps' status
detect-interval = 10

# Use the specified compressor to compress payload between pump and drainer
liubo0127 marked this conversation as resolved.
Show resolved Hide resolved
# compressor = "gzip"

# syncer Configuration.
[syncer]
# Assume the upstream sql-mode.
# If this is setted , will use the same sql-mode to parse DDL statment, and set the same sql-mode at downstream when db-type is mysql.
liubo0127 marked this conversation as resolved.
Show resolved Hide resolved
# If this is not setted, it will not set any sql-mode.
liubo0127 marked this conversation as resolved.
Show resolved Hide resolved
# sql-mode = "STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION

# disable sync these schema
ignore-schemas = "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql"
Expand All @@ -17,26 +24,33 @@ txn-batch = 20
# to get higher throughput by higher concurrent write to the downstream
worker-count = 16

# whether to disable the SQL feature of splitting a single binlog file. If it is set to "true",
liubo0127 marked this conversation as resolved.
Show resolved Hide resolved
# each binlog file is restored to a single transaction for synchronization based on the order of binlogs.
liubo0127 marked this conversation as resolved.
Show resolved Hide resolved
# If the downstream service is MySQL, set it to "False".
disable-dispatch = false

# safe mode will split update to delete and insert
safe-mode = false

# downstream storage, equal to --dest-db-type
# valid values are "mysql", "pb", "tidb", "flash", "kafka"
# valid values are "mysql", "file", "tidb", "flash", "kafka"
db-type = "mysql"

##replicate-do-db priority over replicate-do-table if have same db name
##and we support regex expression , start with '~' declare use regex expression.
#
#replicate-do-db = ["~^b.*","s1"]
#[[syncer.replicate-do-table]]
#db-name ="test"
#tbl-name = "log"
# replicate-do-db priority over replicate-do-table if have same db name
# and we support regex expression , start with '~' declare use regex expression.
# replicate-do-db = ["~^b.*","s1"]
# [[syncer.replicate-do-table]]
# db-name ="test"
# tbl-name = "log"

# [[syncer.replicate-do-table]]
# db-name ="test"
# tbl-name = "~^a.*"

#[[syncer.replicate-do-table]]
#db-name ="test"
#tbl-name = "~^a.*"
# disable sync these table
# [[syncer.ignore-table]]
# db-name = "test"
# tbl-name = "log"

# the downstream mysql protocol database
[syncer.to]
Expand All @@ -45,15 +59,12 @@ user = "root"
password = ""
port = 3306

# Uncomment this if you want to use pb or sql as db-type.
# Compress compresses output file, like pb and sql file. Now it supports "gzip" algorithm only.
# Values can be "gzip". Leave it empty to disable compression.
#[syncer.to]
#dir = "data.drainer"
#compression = "gzip"
# Uncomment this if you want to use file as db-type.
# [syncer.to]
# dir = "data.drainer"

# when db-type is kafka, you can uncomment this to config the down stream kafka, it will be the globle config kafka default
#[syncer.to]
# [syncer.to]
# only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed.
# zookeeper-addrs = "127.0.0.1:2181"
# kafka-addrs = "127.0.0.1:9092"
Expand Down
7 changes: 2 additions & 5 deletions conf/drainer-kafka.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ disable-dispatch = false
safe-mode = false

# downstream storage, equal to --dest-db-type
# valid values are "mysql", "pb", "tidb", "flash", "kafka"
# valid values are "mysql", "file", "tidb", "flash", "kafka"
db-type = "mysql"

##replicate-do-db priority over replicate-do-table if have same db name
Expand All @@ -45,12 +45,9 @@ user = "root"
password = ""
port = 3306

# Uncomment this if you want to use pb or sql as db-type.
# Compress compresses output file, like pb and sql file. Now it supports "gzip" algorithm only.
# Values can be "gzip". Leave it empty to disable compression.
# Uncomment this if you want to use file as db-type.
#[syncer.to]
#dir = "data.drainer"
#compression = "gzip"

# when db-type is kafka, you can uncomment this to config the down stream kafka, it will be the globle config kafka default
#[syncer.to]
Expand Down
4 changes: 4 additions & 0 deletions conf/pump-cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,7 @@ security:

# Path of file that contains X509 key in PEM format for connection with cluster components.
# ssl-key: "/path/to/drainer-key.pem"

storage:
# Set to true (by default) to guarantee reliability by ensuring binlog data is flushed to the disk.
# sync-log: true
3 changes: 3 additions & 0 deletions group_vars/all.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,5 +49,8 @@ pump_cert_dir: "{{ deploy_dir }}/conf/ssl"
# pump binlog to pump sock
pump_socket: "{{ status_dir }}/pump.sock"

# binlog version, "kafka" or "cluster":
binlog_version: "cluster"

# drainer
drainer_port: 8249
3 changes: 0 additions & 3 deletions inventory.ini
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,6 @@ set_hostname = False
## binlog trigger
enable_binlog = False

# binlog version, "kafka" or "cluster":
binlog_version = "cluster"

# kafka cluster address for monitoring, example:
# kafka_addrs = "192.168.0.11:9092,192.168.0.12:9092,192.168.0.13:9092"
kafka_addrs = ""
Expand Down
5 changes: 5 additions & 0 deletions roles/pump_cluster/templates/pump.toml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,8 @@
{% for item, value in pump_conf.security | dictsort -%}
{{ item }} = {{ value | to_json }}
{% endfor %}

[storage]
{% for item, value in pump_conf.storage | dictsort -%}
{{ item }} = {{ value | to_json }}
{% endfor %}
4 changes: 4 additions & 0 deletions roles/pump_cluster/vars/default.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,7 @@ security:

# Path of file that contains X509 key in PEM format for connection with cluster components.
ssl-key: ""

storage:
# Set to true (by default) to guarantee reliability by ensuring binlog data is flushed to the disk.
# sync-log: true