forked from confluentinc/cp-ansible
-
Notifications
You must be signed in to change notification settings - Fork 0
/
hosts_example.yml
354 lines (328 loc) · 16.3 KB
/
hosts_example.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
all:
vars:
ansible_connection: ssh
ansible_user: ec2-user
ansible_become: true
ansible_ssh_private_key_file: /tmp/certs/ssh_priv.pem
#### Setting Proxy Environment variables ####
## To set proxy env vars for the duration of playbook run, uncomment below block and set as necessary
# proxy_env:
# http_proxy: http://proxy.example.com:8080
# https_proxy: http://proxy.example.com:8080
# no_proxy: http://proxy.example.com:8080
#### SASL Authentication Configuration ####
## By default there will be no SASL Authentication
## For SASL/PLAIN uncomment this line:
# sasl_protocol: plain
## For SASL/SCRAM uncomment this line:
# sasl_protocol: scram
## For SASL/GSSAPI uncomment this line and see Kerberos Configuration properties below
# sasl_protocol: kerberos
#### Kerberos Configuration ####
## Applicable when sasl_protocol is kerberos
# kerberos_kafka_broker_primary: <Name of the primary set on the kafka brokers' principal eg. kafka>
## REQUIRED: Under each host set keytab file path and principal name, see below
# kerberos_configure: <Boolean for ansible to install kerberos packages and configure this file: /etc/krb5.conf, defaults to true>
# kerberos:
# realm: <KDC server realm eg. confluent.example.com>
# kdc_hostname: <hostname of machine with KDC running eg. ip-172-31-45-82.us-east-2.compute.internal>
# admin_hostname: <hostname of machine with KDC running eg. ip-172-31-45-82.us-east-2.compute.internal>
#### TLS Configuration ####
## By default, data will NOT be encrypted. To turn on TLS encryption, uncomment this line
# ssl_enabled: true
## By default, the components will be configured with One-Way TLS, to turn on TLS mutual auth, uncomment this line:
# ssl_mutual_auth_enabled: true
## By default, the certs for this configuration will be self signed, to deploy custom certificates there are two options.
## Option 1: Custom Certs
## You will need to provide the path to the Certificate Authority Cert used to sign each hosts' certs
## As well as the signed certificate path and the key for that certificate for each host.
## These will need to be set for the correct host
# ssl_custom_certs: true
# ssl_ca_cert_filepath: "/tmp/certs/ca.crt"
# ssl_signed_cert_filepath: "/tmp/certs/{{inventory_hostname}}-signed.crt"
# ssl_key_filepath: "/tmp/certs/{{inventory_hostname}}-key.pem"
# ssl_key_password: <password for key for each host, will be inputting in the form -passin pass:{{ssl_key_password}} >
## Option 2: Custom Keystores and Truststores
## CP-Ansible can move keystores/truststores to their corresponding hosts and configure the components to use them. Set These vars
# ssl_provided_keystore_and_truststore: true
# ssl_keystore_filepath: "/tmp/certs/{{inventory_hostname}}-keystore.jks"
# ssl_keystore_key_password: mystorepassword
# ssl_keystore_store_password: mystorepassword
# ssl_truststore_filepath: "/tmp/certs/truststore.jks"
# ssl_truststore_password: truststorepass
#### Certificate Regeneration ####
## When using self signed certificates, each playbook run will regenerate the CA, to turn this off, uncomment this line:
# regenerate_ca: false
## By default, if keystore/truststore files exists for a component, the playbook will not recreate them
## To recreate the keystores and truststores uncomment this line:
# regenerate_keystore_and_truststore: true
#### Monitoring Configuration ####
## Jolokia is enabled by default. The Jolokia jar gets pulled from the internet and enabled on all the components
## To disable, uncomment this line:
# jolokia_enabled: false
## During setup, the hosts will download the jolokia agent jar from Maven. To update that jar download set this var
# jolokia_jar_url: http://<inteneral-server>/jolokia-jvm-1.6.2-agent.jar
## JMX Exporter is disabled by default. When enabled, JMX Exporter jar will be pulled from the Internet and enabled on the broker *only*.
## To enable, uncomment this line:
# jmxexporter_enabled: true
## To update that jar download set this var
# jmxexporter_jar_url: http://<internal-server>/jmx_prometheus_javaagent-0.12.0.jar
#### Custom Yum Repo File (Rhel/Centos) ####
## If you are using your own yum repo server to host the packages, in the case of an air-gapped environment,
## use the below variables to distribute a custom .repo file to the hosts and skip our repo setup.
## Note, your repo server must host all confluent packages
# custom_yum_repofile: true
# custom_yum_repofile_filepath: /tmp/my-repo.repo
#### Custom Apt Repo File (Ubuntu/Debian) ####
## If you are using your own apt repo server to host the packages, in the case of an air-gapped environment,
## use the below variables to distribute a custom .repo file to the hosts and skip our repo setup.
## Note, your repo server must host all confluent packages
# custom_apt_repo: true
# custom_apt_repo_filepath: "/tmp/my-source.list"
#### Confluent Server vs Confluent Kafka ####
## Confluent Server will be installed by default, to install confluent-kafka instead, uncomment the below
# confluent_server_enabled: false
#### Schema Validation ####
## Schema Validation with the kafka configuration is disabled by default. To enable uncomment this line:
## Schema Validation only works with confluent_server_enabled: true
# kafka_broker_schema_validation_enabled: true
#### Fips Security ####
## To enable Fips for added security, uncomment the below line.
## Fips only works with ssl_enabled: true and confluent_server_enabled: true
# fips_enabled: true
#### Configuring Different Security on both Listeners ####
## CP-Ansible will configure two listeners on the broker: an internal listener for the broker to communicate and an external for the components and other clients.
## If you only need one listener uncomment this line:
# kafka_broker_configure_additional_brokers: false
## By default both of these listeners will follow whatever you set for ssl_enabled and sasl_protocol.
## To configure different security settings on the internal and external listeners set the following variables:
# kafka_broker_custom_listeners:
# internal:
# name: INTERNAL
# port: 9091
# ssl_enabled: false
# ssl_mutual_auth_enabled: false
# sasl_protocol: none
# external:
# name: EXTERNAL
# port: 9092
# ssl_enabled: true
# ssl_mutual_auth_enabled: false
# sasl_protocol: scram
## You can even add additional listeners, make sure all variables are set
# client_listener:
# name: CLIENT
# port: 9093
# ssl_enabled: true
# ssl_mutual_auth_enabled: true
# sasl_protocol: scram
#### Creating Connectors ####
## To manage the connector configs from Ansible, set the following list of connector objects:
## one per connector, must have `name` and `config` properties
## make sure to provide the numeric values as strings
# kafka_connect_connectors:
# - name: sample-connector
# config:
# connector.class: "FileStreamSinkConnector"
# tasks.max: "1"
# file: "path/to/file.txt"
# topics: "test_topic"
## To set custom properties for each service
## Find property options in the Confluent Documentation
# zookeeper:
# properties:
# initLimit: 6
# syncLimit: 3
# kafka_broker:
# properties:
# num.io.threads: 15
# schema_registry:
# properties:
# key: val
# control_center:
# properties:
# key: val
# kafka_connect:
# properties:
# key: val
# kafka_rest:
# properties:
# key: val
# ksql:
# properties:
# key: val
zookeeper:
## To configure Zookeeper to run as a custom user, uncomment below
# vars:
# zookeeper_user: custom-user
# zookeeper_group: custom-group
hosts:
ip-172-31-34-246.us-east-2.compute.internal:
## By default the first host will get zookeeper id=1, second gets id=2. Set zookeeper_id to customize
# zookeeper_id: 2
## For kerberos sasl protocol, EACH host will need these two variables:
# zookeeper_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/zookeeper-ip-172-31-34-246.us-east-2.compute.internal.keytab>
# zookeeper_kerberos_principal: <The principal configured in kdc server, eg. zookeeper/[email protected]>
ip-172-31-37-15.us-east-2.compute.internal:
# zookeeper_id: 3
ip-172-31-34-231.us-east-2.compute.internal:
# zookeeper_id: 1
kafka_broker:
## To apply variables specifically to the hosts within kafka_broker group, you can add a vars block like below
# vars:
# ## To configure Kafka to run as a custom user, uncomment below
# kafka_broker_user: custom-user
# kafka_broker_group: custom-group
# # To update the log.dirs property within the kafka server.properties, uncomment below
# # By default the log directory is /var/lib/kafka/data
# kafka_broker:
# datadir:
# - /var/lib/kafka/my-data
hosts:
ip-172-31-34-246.us-east-2.compute.internal:
## By default the first host will get broker id=1, second gets id=2. Set broker_id to customize
# broker_id: 3
## Properties can also be applied on a host by host basis.
## In the below example we configure a Multi-Region Clusters by setting the following properties on each host:
# kafka_broker:
# properties:
# broker.rack: us-east-1d
# replica.selector.class: org.apache.kafka.common.replica.RackAwareReplicaSelector
## For kerberos sasl protocol, EACH host will need these two variables:
# kafka_broker_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/ip-172-31-34-246.us-east-2.compute.internal>
# kafka_broker_kerberos_principal: <The principal configured in kdc server, eg. kafka/[email protected]>
ip-172-31-37-15.us-east-2.compute.internal:
# broker_id: 2
# kafka_broker:
# properties:
# broker.rack: us-east-1a
# replica.selector.class: org.apache.kafka.common.replica.RackAwareReplicaSelector
ip-172-31-34-231.us-east-2.compute.internal:
# broker_id: 1
# kafka_broker:
# properties:
# broker.rack: us-east-1b
# replica.selector.class: org.apache.kafka.common.replica.RackAwareReplicaSelector
schema_registry:
## To configure Schema Registry to run as a custom user, uncomment below
# vars:
# schema_registry_user: custom-user
# schema_registry_group: custom-group
hosts:
ip-172-31-34-246.us-east-2.compute.internal:
## For kerberos sasl protocol, EACH host will need these two variables:
# schema_registry_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/schemaregistry-ip-172-31-34-246.us-east-2.compute.internal
# schema_registry_kerberos_principal: The principal configured in kdc server ex: schemaregistry/[email protected]>
kafka_rest:
## To configure Rest Proxy to run as a custom user, uncomment below
# vars:
# kafka_rest_user: custom-user
# kafka_rest_group: custom-group
hosts:
ip-172-31-34-246.us-east-2.compute.internal:
## For kerberos sasl protocol, EACH host will need these two variables:
# kafka_rest_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/restproxy-ip-172-31-34-246.us-east-2.compute.internal
# kafka_rest_kerberos_principal: The principal configured in kdc server ex: restproxy/[email protected]>
ksql:
## To configure KSQL to run as a custom user, uncomment below
# vars:
# ksql_user: custom-user
# ksql_group: custom-group
hosts:
ip-172-31-37-15.us-east-2.compute.internal:
## For kerberos sasl protocol, EACH host will need these two variables:
# ksql_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/ksql-ip-172-31-37-15.us-east-2.compute.internal
# ksql_kerberos_principal: The principal configured in kdc server ex: ksql/[email protected]>
#### To configure multiple ksql clusters, make use of child groups and follow the example below
## Note: There can only be one ksql group, so comment out above section, if configuring multiple ksql clusters
## Decide on a name each ksql cluster (that is not 'ksql') and use that as ansible group name, this is how the cluster will be named in c3
# ksql1:
# vars:
# # The below is a mandatory variable that must be unique for each ksql cluster.
# # The service id should end in an underscore by convention
# ksql_service_id: ksql1_
# hosts:
# ip-172-31-34-15.us-east-2.compute.internal:
# ip-172-31-37-16.us-east-2.compute.internal:
#
# ksql2:
# vars:
# ksql_service_id: ksql2_
# hosts:
# ip-172-31-34-17.us-east-2.compute.internal:
# ip-172-31-37-18.us-east-2.compute.internal:
#
# ksql:
# children:
# ksql1:
# ksql2:
kafka_connect:
## To configure Connect to run as a custom user, uncomment below
# vars:
# kafka_connect_user: custom-user
# kafka_connect_group: custom-group
#
#### Connectors and the Confluent Hub
#
## Adding Connector Paths.
## NOTE: This variable is mapped to the `plugin.path` Kafka Connect property.
# kafka_connect_plugins_path:
# - /usr/share/java
# - /my/connectors/dir
#
## Installing Connectors From Confluent Hub
# kafka_connect_confluent_hub_plugins:
# - jcustenborder/kafka-connect-spooldir:2.0.43
#
## Installing Connectors from Archive files local to Ansible host
# kafka_connect_plugins:
# - local/path/to/connect_archive.zip
#
## Installing Connectors from Archive files in remote server (ie Nexus)
# kafka_connect_plugins_remote:
# - http://myhost.com/connect_archive.zip
hosts:
ip-172-31-34-246.us-east-2.compute.internal:
## For kerberos sasl protocol, EACH host will need these two variables:
# kafka_connect_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/connect-ip-172-31-34-246.us-east-2.compute.internal
# kafka_connect_kerberos_principal: The principal configured in kdc server ex: connect/[email protected]>
#### To configure multiple connect clusters, make use of child groups and follow the example below
## Note: There can only be one kafka_connect group, so comment out above section, if configuring multiple connect clusters
## Decide on a name for each connect cluster (that is not 'kafka_connect') and use that as ansible group name
# syslog:
# vars:
# # Decide on a group id for each connect cluster. This is a mandatory variable, and must be unique for each cluster
# # The group id will be the name of the connect cluster within c3
# kafka_connect_group_id: connect_syslog
# hosts:
# ip-172-31-34-246.us-east-2.compute.internal:
#
# elastic:
# vars:
# kafka_connect_group_id: connect-elastic
# hosts:
# ip-172-31-34-246.us-east-2.compute.internal:
#
# kafka_connect:
# children:
# syslog:
# elastic:
control_center:
## To configure Control Center to run as a custom user, uncomment below
# vars:
# control_center_user: custom-user
# control_center_group: custom-group
hosts:
ip-172-31-37-15.us-east-2.compute.internal:
## For kerberos sasl protocol, EACH host will need these two variables:
# control_center_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/controlcenter-ip-172-31-37-15.us-east-2.compute.internal
# control_center_kerberos_principal: The principal configured in kdc server ex: controlcenter/[email protected]>
# ## If you are configuring multiple connect or ksql clusters, the below variables are mandatory.
# # The group names must match the group names as they are in your inventory
# vars:
# ksql_cluster_ansible_group_names:
# - ksql1
# - ksql2
# kafka_connect_cluster_ansible_group_names:
# - syslog
# - elastic