From 7537eadcdfa5450f3627da304de25be281ee46f5 Mon Sep 17 00:00:00 2001 From: cicharka <93913624+cicharka@users.noreply.github.com> Date: Mon, 28 Mar 2022 12:45:49 +0200 Subject: [PATCH] Kafka upgrade (#2871) (#2803) (#2990) Kafka: * upgrade to 2.8.1 version * move upgrade tasks to Kafka role * refactor installation and upgrade tasks * refactor: rename 'kafka_var' setting Zookeeper: * refactor installation and upgrade * move upgrade tasks to Zookeeper role Kafka-exporter: * verify if service definition needs to be updated even if exporter itself was not updated * remove `kafka.version` from configuration --- ansible/playbooks/kafka.yml | 4 +- .../playbooks/roles/kafka/defaults/main.yml | 6 +- .../roles/kafka/files/jmx-kafka-config.yml | 2 +- .../playbooks/roles/kafka/handlers/main.yml | 14 +- .../common/download_and_unpack_binary.yml | 22 ++ .../roles/kafka/tasks/{ => common}/start.yml | 4 +- .../roles/kafka/tasks/common/stop.yml | 5 + .../kafka/tasks/generate-certificates.yml | 111 ++++---- ansible/playbooks/roles/kafka/tasks/main.yml | 6 +- .../playbooks/roles/kafka/tasks/metrics.yml | 97 +++---- .../roles/kafka/tasks/setup-kafka.yml | 136 +++++----- ansible/playbooks/roles/kafka/tasks/stop.yml | 4 - .../kafka/tasks/upgrade/install-upgrade.yml | 34 +++ .../tasks/upgrade/main.yml} | 31 +-- .../kafka/tasks/upgrade/preflight-check.yml | 14 + .../tasks/upgrade}/set-updated-version.yml | 10 +- .../tasks/upgrade}/update-properties.yml | 8 +- .../roles/kafka/tasks/verify-kafka.yml | 2 +- .../kafka/templates/client-ssl.properties.j2 | 4 +- .../roles/kafka/templates/kafka.service.j2 | 10 +- .../templates/kafka_producer_consumer.py.j2 | 6 +- .../kafka/templates/kafka_server_jaas.conf.j2 | 6 +- .../roles/kafka/templates/logrotate.conf.j2 | 2 +- .../kafka/templates/server.properties.j2 | 86 +++---- .../kafka_exporter/tasks/upgrade/main.yml | 10 + .../upgrade/verify-service-definition.yml | 46 ++++ .../templates/kafka-exporter.service.j2 | 2 +- .../requirements/x86_64/files.yml | 4 +- .../upgrade/tasks/kafka/install-upgrade.yml | 55 ---- .../upgrade/tasks/kafka/preflight-check.yml | 11 - .../tasks/zookeeper/install-upgrade.yml | 79 ------ .../tasks/zookeeper/preflight-check.yml | 14 - .../roles/zookeeper/defaults/main.yml | 1 + .../roles/zookeeper/handlers/main.yml | 2 +- .../common/download_and_unpack_binary.yml | 30 +++ .../playbooks/roles/zookeeper/tasks/main.yml | 88 ++----- .../roles/zookeeper/tasks/metrics.yml | 4 +- .../tasks/upgrade/install-upgrade.yml | 53 ++++ .../tasks/upgrade/main.yml} | 12 +- .../tasks/upgrade/preflight-check.yml | 14 + ansible/playbooks/upgrade.yml | 14 +- ansible/playbooks/zookeeper.yml | 4 +- cli/src/ansible/AnsibleVarsGenerator.py | 2 +- docs/changelogs/CHANGELOG-2.0.md | 2 + docs/home/COMPONENTS.md | 2 +- docs/home/howto/RETENTION.md | 13 +- docs/home/howto/UPGRADE.md | 1 + .../defaults/configuration/kafka-exporter.yml | 1 - .../common/defaults/configuration/kafka.yml | 131 +++++----- .../common/validation/configuration/kafka.yml | 239 +++++++++--------- 50 files changed, 742 insertions(+), 716 deletions(-) create mode 100644 ansible/playbooks/roles/kafka/tasks/common/download_and_unpack_binary.yml rename ansible/playbooks/roles/kafka/tasks/{ => common}/start.yml (86%) create mode 100644 ansible/playbooks/roles/kafka/tasks/common/stop.yml delete mode 100644 ansible/playbooks/roles/kafka/tasks/stop.yml create mode 100644 ansible/playbooks/roles/kafka/tasks/upgrade/install-upgrade.yml rename ansible/playbooks/roles/{upgrade/tasks/kafka.yml => kafka/tasks/upgrade/main.yml} (51%) create mode 100644 ansible/playbooks/roles/kafka/tasks/upgrade/preflight-check.yml rename ansible/playbooks/roles/{upgrade/tasks/kafka => kafka/tasks/upgrade}/set-updated-version.yml (63%) rename ansible/playbooks/roles/{upgrade/tasks/kafka => kafka/tasks/upgrade}/update-properties.yml (83%) create mode 100644 ansible/playbooks/roles/kafka_exporter/tasks/upgrade/verify-service-definition.yml delete mode 100644 ansible/playbooks/roles/upgrade/tasks/kafka/install-upgrade.yml delete mode 100644 ansible/playbooks/roles/upgrade/tasks/kafka/preflight-check.yml delete mode 100644 ansible/playbooks/roles/upgrade/tasks/zookeeper/install-upgrade.yml delete mode 100644 ansible/playbooks/roles/upgrade/tasks/zookeeper/preflight-check.yml create mode 100644 ansible/playbooks/roles/zookeeper/tasks/common/download_and_unpack_binary.yml create mode 100644 ansible/playbooks/roles/zookeeper/tasks/upgrade/install-upgrade.yml rename ansible/playbooks/roles/{upgrade/tasks/zookeeper.yml => zookeeper/tasks/upgrade/main.yml} (73%) create mode 100644 ansible/playbooks/roles/zookeeper/tasks/upgrade/preflight-check.yml diff --git a/ansible/playbooks/kafka.yml b/ansible/playbooks/kafka.yml index 85609a5aa1..78d3762f30 100644 --- a/ansible/playbooks/kafka.yml +++ b/ansible/playbooks/kafka.yml @@ -2,8 +2,8 @@ # Ansible playbook that makes sure the base items for all nodes are installed - hosts: all - gather_facts: yes - tasks: [ ] + gather_facts: true + tasks: [] - hosts: kafka become: true diff --git a/ansible/playbooks/roles/kafka/defaults/main.yml b/ansible/playbooks/roles/kafka/defaults/main.yml index 0abffa7915..d20bad6641 100644 --- a/ansible/playbooks/roles/kafka/defaults/main.yml +++ b/ansible/playbooks/roles/kafka/defaults/main.yml @@ -1,4 +1,6 @@ -kafka_version: 2.6.0 +kafka_version: 2.8.1 scala_version: 2.12 -kafka_bin_filename: "kafka_2.12-2.6.0.tgz" +kafka_bin_filename: "kafka_2.12-2.8.1.tgz" +kafka_install_dir: "/opt/kafka_{{ scala_version }}-{{ kafka_version }}" + prometheus_jmx_exporter_path: /opt/jmx-exporter/jmx_prometheus_javaagent.jar diff --git a/ansible/playbooks/roles/kafka/files/jmx-kafka-config.yml b/ansible/playbooks/roles/kafka/files/jmx-kafka-config.yml index 6a394fd80a..21b32cb214 100644 --- a/ansible/playbooks/roles/kafka/files/jmx-kafka-config.yml +++ b/ansible/playbooks/roles/kafka/files/jmx-kafka-config.yml @@ -85,4 +85,4 @@ rules: name: kafka_$1_$2_$3 type: GAUGE labels: - quantile: "0.$4" \ No newline at end of file + quantile: "0.$4" diff --git a/ansible/playbooks/roles/kafka/handlers/main.yml b/ansible/playbooks/roles/kafka/handlers/main.yml index b7668bd059..36fbfe6c82 100644 --- a/ansible/playbooks/roles/kafka/handlers/main.yml +++ b/ansible/playbooks/roles/kafka/handlers/main.yml @@ -1,20 +1,22 @@ --- # Handlers for Kafka -- name: restart kafka +- name: Restart kafka service: name: kafka state: restarted - enabled: yes + enabled: true retries: 10 delay: 10 -- name: restart prometheus +- name: Restart prometheus become: true systemd: daemon_reload: true name: prometheus state: restarted - delegate_to: "{{ item }}" - with_inventory_hostnames: - - prometheus + delegate_to: "{{ node }}" + loop_control: + loop_var: node + loop: "{{ groups.prometheus }}" + when: groups.prometheus is defined diff --git a/ansible/playbooks/roles/kafka/tasks/common/download_and_unpack_binary.yml b/ansible/playbooks/roles/kafka/tasks/common/download_and_unpack_binary.yml new file mode 100644 index 0000000000..7596f736bf --- /dev/null +++ b/ansible/playbooks/roles/kafka/tasks/common/download_and_unpack_binary.yml @@ -0,0 +1,22 @@ +--- +- name: Download Kafka binaries + include_role: + name: download + tasks_from: download_file + vars: + file_name: "{{ kafka_bin_filename }}" + +- name: Uncompress the Kafka tar + unarchive: + remote_src: true + creates: "{{ kafka_install_dir }}" + src: "{{ download_directory }}/{{ kafka_bin_filename }}" + dest: /opt + +- name: Change ownership on Kafka directory + file: + path: "{{ kafka_install_dir }}" + state: directory + mode: u=rwx,go=rx + owner: "{{ specification.user }}" + group: "{{ specification.group }}" diff --git a/ansible/playbooks/roles/kafka/tasks/start.yml b/ansible/playbooks/roles/kafka/tasks/common/start.yml similarity index 86% rename from ansible/playbooks/roles/kafka/tasks/start.yml rename to ansible/playbooks/roles/kafka/tasks/common/start.yml index 47a87e4181..1f033ccfdd 100644 --- a/ansible/playbooks/roles/kafka/tasks/start.yml +++ b/ansible/playbooks/roles/kafka/tasks/common/start.yml @@ -1,10 +1,10 @@ --- - - name: Enable and Start Kafka service: name: kafka state: started - enabled: yes + enabled: true + daemon-reload: true # - name: wait for kafka port # wait_for: host={{kafka.listen_address| default('localhost')}} port={{kafka.port}} state=started timeout={{ kafka.wait_for_period }} diff --git a/ansible/playbooks/roles/kafka/tasks/common/stop.yml b/ansible/playbooks/roles/kafka/tasks/common/stop.yml new file mode 100644 index 0000000000..492ed55ac1 --- /dev/null +++ b/ansible/playbooks/roles/kafka/tasks/common/stop.yml @@ -0,0 +1,5 @@ +--- +- name: Stop Kafka + systemd: + name: kafka + state: stopped diff --git a/ansible/playbooks/roles/kafka/tasks/generate-certificates.yml b/ansible/playbooks/roles/kafka/tasks/generate-certificates.yml index 18ef907cd2..ed6e216943 100644 --- a/ansible/playbooks/roles/kafka/tasks/generate-certificates.yml +++ b/ansible/playbooks/roles/kafka/tasks/generate-certificates.yml @@ -1,40 +1,46 @@ - name: Create stores directory file: - path: "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}" + path: "{{ specification.security.ssl.server.keystore_location | dirname }}" state: directory - owner: "{{ specification.kafka_var.user }}" - group: "{{ specification.kafka_var.group }}" - mode: "0755" + owner: "{{ specification.user }}" + group: "{{ specification.group }}" + mode: u=rwx,go=rx - name: Check if keystore exists on broker stat: - path: "{{ specification.kafka_var.security.ssl.server.keystore_location }}" + path: "{{ specification.security.ssl.server.keystore_location }}" + get_attributes: false + get_checksum: false + get_mime: false changed_when: false register: keystore_exists - name: Generate keystore for each server - shell: keytool -keystore {{ specification.kafka_var.security.ssl.server.keystore_location }} \ - -alias localhost -validity {{ specification.kafka_var.security.ssl.server.cert_validity }} -genkey -keyalg RSA \ - -noprompt -storepass {{ specification.kafka_var.security.ssl.server.passwords.keystore }} \ - -keypass {{ specification.kafka_var.security.ssl.server.passwords.key }} \ - -dname "CN={{ inventory_hostname }}" -ext SAN="DNS:{{ inventory_hostname }}" + command: keytool -keystore {{ specification.security.ssl.server.keystore_location }} \ + -alias localhost -validity {{ specification.security.ssl.server.cert_validity }} -genkey -keyalg RSA \ + -noprompt -storepass {{ specification.security.ssl.server.passwords.keystore }} \ + -keypass {{ specification.security.ssl.server.passwords.key }} \ + -dname "CN={{ inventory_hostname }}" -ext SAN="DNS:{{ inventory_hostname }}" when: - not keystore_exists.stat.exists - name: Check if signing certificate exists stat: - path: "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/ca-cert" + path: "{{ specification.security.ssl.server.keystore_location | dirname }}/ca-cert" + get_attributes: false + get_checksum: false + get_mime: false register: signing_certificate_exists changed_when: false when: - groups['kafka'][0] == inventory_hostname - name: Generate signing certificate - shell: openssl req -new -x509 -keyout {{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/ca-key \ - -out {{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/ca-cert \ - -days {{ specification.kafka_var.security.ssl.server.cert_validity }} \ - -subj "/CN={{ inventory_hostname }}" \ - --passout pass:{{ specification.kafka_var.security.ssl.server.passwords.key }} + command: openssl req -new -x509 -keyout {{ specification.security.ssl.server.keystore_location | dirname }}/ca-key \ + -out {{ specification.security.ssl.server.keystore_location | dirname }}/ca-cert \ + -days {{ specification.security.ssl.server.cert_validity }} \ + -subj "/CN={{ inventory_hostname }}" \ + --passout pass:{{ specification.security.ssl.server.passwords.key }} when: - groups['kafka'][0] == inventory_hostname - not signing_certificate_exists.stat.exists @@ -42,15 +48,16 @@ - name: Create kafka certificates directory on Epiphany host become: false file: - path: "{{ specification.kafka_var.security.ssl.server.local_cert_download_path }}" + path: "{{ specification.security.ssl.server.local_cert_download_path }}" state: directory + mode: u=rwx,go= delegate_to: localhost - name: Fetching files - fetch: - src: "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/{{ item }}" - dest: "{{ specification.kafka_var.security.ssl.server.local_cert_download_path }}/{{ item }}" - flat: yes + slurp: + src: "{{ specification.security.ssl.server.keystore_location | dirname }}/{{ item }}" + dest: "{{ specification.security.ssl.server.local_cert_download_path }}/{{ item }}" + flat: true loop: - "ca-cert" - "ca-key" @@ -59,8 +66,9 @@ - name: Copy signing certificate and key to brokers copy: - src: "{{ specification.kafka_var.security.ssl.server.local_cert_download_path }}/{{ item }}" - dest: "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/" + src: "{{ specification.security.ssl.server.local_cert_download_path }}/{{ item }}" + dest: "{{ specification.security.ssl.server.keystore_location | dirname }}/" + mode: preserve loop: - "ca-cert" - "ca-key" @@ -69,20 +77,23 @@ - name: Check if trustore exists stat: - path: "{{ specification.kafka_var.security.ssl.server.truststore_location }}" + path: "{{ specification.security.ssl.server.truststore_location }}" + get_attributes: false + get_checksum: false + get_mime: false register: trustore_exists - name: Create trustore - shell: keytool -noprompt -keystore "{{ specification.kafka_var.security.ssl.server.truststore_location }}" -alias CARoot \ - -import -file "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/ca-cert" \ - -storepass {{ specification.kafka_var.security.ssl.server.passwords.keystore }} \ - -keypass {{ specification.kafka_var.security.ssl.server.passwords.key }} + command: keytool -noprompt -keystore "{{ specification.security.ssl.server.truststore_location }}" -alias CARoot \ + -import -file "{{ specification.security.ssl.server.keystore_location | dirname }}/ca-cert" \ + -storepass {{ specification.security.ssl.server.passwords.keystore }} \ + -keypass {{ specification.security.ssl.server.passwords.key }} when: - not trustore_exists.stat.exists - name: Check if CA certificate is already imported - shell: keytool -list -v -keystore {{ specification.kafka_var.security.ssl.server.keystore_location }} \ - -storepass {{ specification.kafka_var.security.ssl.server.passwords.keystore }} \ + shell: set -o pipefail && keytool -list -v -keystore {{ specification.security.ssl.server.keystore_location }} \ + -storepass {{ specification.security.ssl.server.passwords.keystore }} \ | grep -i "Alias name" | grep -i "caroot" failed_when: "caroot_exists.rc == 2" changed_when: false @@ -90,8 +101,8 @@ - name: Check if certificate signed by CA is already imported shell: |- - keytool -list -v -keystore {{ specification.kafka_var.security.ssl.server.keystore_location }} \ - -storepass {{ specification.kafka_var.security.ssl.server.passwords.keystore }} \ + set -o pipefail && keytool -list -v -keystore {{ specification.security.ssl.server.keystore_location }} \ + -storepass {{ specification.security.ssl.server.passwords.keystore }} \ -alias localhost \ | grep -i 'Certificate chain length: 2' failed_when: "signed_cert_exists.rc == 2" @@ -99,41 +110,41 @@ register: signed_cert_exists - name: Export certificate to sign certificate with CA - shell: keytool -noprompt -keystore {{ specification.kafka_var.security.ssl.server.keystore_location }} \ - -alias localhost -certreq \ - -file "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/cert-file" \ - -storepass {{ specification.kafka_var.security.ssl.server.passwords.keystore }} \ - -keypass {{ specification.kafka_var.security.ssl.server.passwords.key }} + command: keytool -noprompt -keystore {{ specification.security.ssl.server.keystore_location }} \ + -alias localhost -certreq \ + -file "{{ specification.security.ssl.server.keystore_location | dirname }}/cert-file" \ + -storepass {{ specification.security.ssl.server.passwords.keystore }} \ + -keypass {{ specification.security.ssl.server.passwords.key }} when: - signed_cert_exists.rc == 1 - name: Signing certificate with CA - shell: openssl x509 -req -CA "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/ca-cert" \ - -CAkey "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/ca-key" \ - -in "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/cert-file" \ - -out "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/cert-signed" \ - -days {{ specification.kafka_var.security.ssl.server.cert_validity }} -CAcreateserial \ - -passin pass:{{ specification.kafka_var.security.ssl.server.passwords.key }} + command: openssl x509 -req -CA "{{ specification.security.ssl.server.keystore_location | dirname }}/ca-cert" \ + -CAkey "{{ specification.security.ssl.server.keystore_location | dirname }}/ca-key" \ + -in "{{ specification.security.ssl.server.keystore_location | dirname }}/cert-file" \ + -out "{{ specification.security.ssl.server.keystore_location | dirname }}/cert-signed" \ + -days {{ specification.security.ssl.server.cert_validity }} -CAcreateserial \ + -passin pass:{{ specification.security.ssl.server.passwords.key }} when: - signed_cert_exists.rc == 1 - name: Import certificate CA - shell: keytool -noprompt -keystore {{ specification.kafka_var.security.ssl.server.keystore_location }} -alias CARoot \ - -import -file "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/ca-cert" \ - -storepass {{ specification.kafka_var.security.ssl.server.passwords.keystore }} + command: keytool -noprompt -keystore {{ specification.security.ssl.server.keystore_location }} -alias CARoot \ + -import -file "{{ specification.security.ssl.server.keystore_location | dirname }}/ca-cert" \ + -storepass {{ specification.security.ssl.server.passwords.keystore }} when: - caroot_exists.rc == 1 - name: Import certificate signed by CA - shell: keytool -noprompt -keystore {{ specification.kafka_var.security.ssl.server.keystore_location }} -alias localhost \ - -import -file "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/cert-signed" \ - -storepass {{ specification.kafka_var.security.ssl.server.passwords.keystore }} + command: keytool -noprompt -keystore {{ specification.security.ssl.server.keystore_location }} -alias localhost \ + -import -file "{{ specification.security.ssl.server.keystore_location | dirname }}/cert-signed" \ + -storepass {{ specification.security.ssl.server.passwords.keystore }} when: - signed_cert_exists.rc == 1 - name: Remove extracted key and cert from others than root node file: - path: "{{ specification.kafka_var.security.ssl.server.keystore_location | dirname }}/{{ item }}" + path: "{{ specification.security.ssl.server.keystore_location | dirname }}/{{ item }}" state: absent loop: - "ca-cert" diff --git a/ansible/playbooks/roles/kafka/tasks/main.yml b/ansible/playbooks/roles/kafka/tasks/main.yml index 30df870093..1c29043b55 100644 --- a/ansible/playbooks/roles/kafka/tasks/main.yml +++ b/ansible/playbooks/roles/kafka/tasks/main.yml @@ -3,6 +3,9 @@ - name: Check if jmx exporter is available stat: + get_attributes: false + get_checksum: false + get_mime: false path: "{{ prometheus_jmx_exporter_path }}" register: exporter @@ -13,5 +16,4 @@ - include_tasks: metrics.yml when: exporter.stat.exists -- include_tasks: start.yml - +- include_tasks: common/start.yml diff --git a/ansible/playbooks/roles/kafka/tasks/metrics.yml b/ansible/playbooks/roles/kafka/tasks/metrics.yml index 519d2476ff..4c33a5ba76 100644 --- a/ansible/playbooks/roles/kafka/tasks/metrics.yml +++ b/ansible/playbooks/roles/kafka/tasks/metrics.yml @@ -2,58 +2,65 @@ - name: prometheus jmx | add kafka user to correct jmx exporter user user: - name: "{{ specification.kafka_var.user }}" + name: "{{ specification.user }}" groups: "{{ specification.jmx_exporter_group }}" - append: yes + append: true - name: prometheus jmx | configuration file - become: yes + become: true copy: dest: "{{ specification.prometheus_jmx_config }}" src: jmx-kafka-config.yml - owner: "{{ specification.kafka_var.user }}" - group: "{{ specification.kafka_var.group }}" - mode: 0644 + owner: "{{ specification.user }}" + group: "{{ specification.group }}" + mode: u=rx,go=r -- name: delegated | create prometheus system group - group: - name: prometheus - system: true - state: present - delegate_to: "{{ item }}" - with_inventory_hostnames: - - prometheus +- name: Configure metrics on prometheus machines + when: groups.prometheus is defined + block: + - name: delegated | create prometheus system group + group: + name: prometheus + system: true + state: present + delegate_to: "{{ node }}" + loop_control: + loop_var: node + loop: "{{ groups.prometheus }}" -- name: delegated | create prometheus system user - user: - name: prometheus - system: true - shell: "/usr/sbin/nologin" - group: prometheus - createhome: false - delegate_to: "{{ item }}" - with_inventory_hostnames: - - prometheus + - name: delegated | create prometheus system user + user: + name: prometheus + system: true + shell: "/usr/sbin/nologin" + group: prometheus + createhome: false + delegate_to: "{{ node }}" + loop_control: + loop_var: node + loop: "{{ groups.prometheus }}" -- name: delegated | create file_sd for service discovery configs - file: - dest: "{{ specification.prometheus_config_dir }}/file_sd" - state: directory - owner: root - group: prometheus - mode: 0750 - delegate_to: "{{ item }}" - with_inventory_hostnames: - - prometheus + - name: delegated | create file_sd for service discovery configs + file: + dest: "{{ specification.prometheus_config_dir }}/file_sd" + state: directory + owner: root + group: prometheus + mode: u=rwx,g=rx,o= + delegate_to: "{{ node }}" + loop_control: + loop_var: node + loop: "{{ groups.prometheus }}" -- name: delegated | copy file_sd_config to prometheus hosts - template: - dest: "{{ specification.prometheus_config_dir }}/file_sd/kafka-jmx-{{ inventory_hostname }}.yml" - src: file_sd_config.yml.j2 - owner: root - group: root - mode: 0644 - delegate_to: "{{ item }}" - notify: restart prometheus - with_inventory_hostnames: - - prometheus + - name: delegated | copy file_sd_config to prometheus hosts + template: + dest: "{{ specification.prometheus_config_dir }}/file_sd/kafka-jmx-{{ inventory_hostname }}.yml" + src: file_sd_config.yml.j2 + owner: root + group: root + mode: u=rx,go=r + notify: Restart prometheus + delegate_to: "{{ node }}" + loop_control: + loop_var: node + loop: "{{ groups.prometheus }}" diff --git a/ansible/playbooks/roles/kafka/tasks/setup-kafka.yml b/ansible/playbooks/roles/kafka/tasks/setup-kafka.yml index 7b793c8191..52d5371b0e 100644 --- a/ansible/playbooks/roles/kafka/tasks/setup-kafka.yml +++ b/ansible/playbooks/roles/kafka/tasks/setup-kafka.yml @@ -1,14 +1,14 @@ --- - name: Setup group group: - name: "{{ specification.kafka_var.group }}" - system: yes + name: "{{ specification.group }}" + system: true - name: Setup user user: - name: "{{ specification.kafka_var.user }}" - system: yes - group: "{{ specification.kafka_var.group }}" + name: "{{ specification.user }}" + system: true + group: "{{ specification.group }}" shell: "/usr/sbin/nologin" - name: Install Java package @@ -22,91 +22,65 @@ RedHat: - java-1.8.0-openjdk-headless module_defaults: - yum: { lock_timeout: "{{ yum_lock_timeout }}" } + yum: + lock_timeout: "{{ yum_lock_timeout }}" -- name: Set Kafka file name to install - set_fact: - kafka_file_name: "{{ kafka_bin_filename }}" - -- name: Download Kafka binaries - include_role: - name: download - tasks_from: download_file - vars: - file_name: "{{ kafka_file_name }}" +- name: Download and unpack Kafka's binary + include_tasks: common/download_and_unpack_binary.yml - name: Add Kafka's bin dir to the PATH copy: content: "export PATH=$PATH:/opt/kafka/bin" dest: "/etc/profile.d/kafka_path.sh" - mode: 0755 - -- name: Check for Kafka package - stat: - path: /opt/kafka_{{ scala_version }}-{{ kafka_version }}/bin/kafka-server-start.sh - register: kafka_package - -- name: Uncompress the Kafka tar - unarchive: - remote_src: yes - creates: /opt/kafka_{{ scala_version }}-{{ kafka_version }} - src: "{{ download_directory }}/{{ kafka_file_name }}" - dest: /opt - when: not kafka_package.stat.exists - -- name: Change ownership on kafka directory. - file: - path: /opt/kafka_{{ scala_version }}-{{ kafka_version }} - state: directory - owner: kafka - group: kafka + mode: u=rwx,go=rx - name: Link /opt/kafka to the right version file: - dest: /opt/kafka + path: /opt/kafka state: link - src: /opt/kafka_{{ scala_version }}-{{ kafka_version }} + src: "{{ kafka_install_dir }}" - name: Create systemd config template: dest: /etc/systemd/system/kafka.service owner: root group: root - mode: 0644 + mode: u=rw,go=r src: kafka.service.j2 notify: - - restart kafka + - Restart kafka - name: Reload daemon - command: systemctl daemon-reload + systemd: + daemon-reload: true - name: Create data_dir file: - path: "{{ specification.kafka_var.data_dir }}" + path: "{{ specification.data_dir }}" state: directory - owner: "{{ specification.kafka_var.user }}" - group: "{{ specification.kafka_var.group }}" - mode: 0755 + owner: "{{ specification.user }}" + group: "{{ specification.group }}" + mode: u=rwx,go=rx - name: Remove lost+found in the datadir file: - path: "{{ specification.kafka_var.data_dir }}/lost+found" + path: "{{ specification.data_dir }}/lost+found" state: absent - name: Create log_dir file: - path: "{{ specification.kafka_var.log_dir }}" + path: "{{ specification.log_dir }}" state: directory - owner: "{{ specification.kafka_var.user }}" - group: "{{ specification.kafka_var.group }}" - mode: 0755 + owner: "{{ specification.user }}" + group: "{{ specification.group }}" + mode: u=rwx,go=rx -- name: Create /etc/kafka directory +- name: Create /etc/kafka directory # noqa risky-file-permissions file: path: /etc/kafka state: directory - owner: "{{ specification.kafka_var.user }}" - group: "{{ specification.kafka_var.group }}" + owner: "{{ specification.user }}" + group: "{{ specification.group }}" # - name: link conf_dir to /opt/kafka/config # file: dest=/etc/kafka owner=kafka group=kafka state=link src=/opt/kafka/config @@ -114,34 +88,34 @@ # Setup log4j.properties - name: Create log4j.properties file: - dest: "{{ specification.kafka_var.conf_dir }}/log4j.properties" - owner: "{{ specification.kafka_var.user }}" - group: "{{ specification.kafka_var.group }}" - mode: 0644 + path: "{{ specification.conf_dir }}/log4j.properties" + owner: "{{ specification.user }}" + group: "{{ specification.group }}" + mode: u=rw,go=r - name: Generate certificate include_tasks: generate-certificates.yml when: - - specification.kafka_var.security.ssl.enabled is defined - - specification.kafka_var.security.ssl.enabled + - specification.security.ssl.enabled is defined + - specification.security.ssl.enabled # Setup server.properties - name: Create server.properties template: - dest: "{{ specification.kafka_var.conf_dir }}/server.properties" - owner: "{{ specification.kafka_var.user }}" - group: "{{ specification.kafka_var.group }}" + dest: "{{ specification.conf_dir }}/server.properties" + owner: "{{ specification.user }}" + group: "{{ specification.group }}" # Was 0640 - mode: 0644 + mode: u=rw,go=r src: server.properties.j2 register: create_server_properties notify: - - restart kafka + - Restart kafka - name: Delete meta.properties become: true file: - path: "{{ specification.kafka_var.data_dir }}/meta.properties" + path: "{{ specification.data_dir }}/meta.properties" state: absent when: create_server_properties.changed @@ -150,23 +124,35 @@ dest: /etc/logrotate.d/kafka owner: root group: root - mode: 0644 + mode: u=rw,go=r src: logrotate.conf.j2 - name: configure system settings, file descriptors and number of threads for kafka pam_limits: - domain: "{{ specification.kafka_var.user }}" + domain: "{{ specification.user }}" limit_type: "{{ item.limit_type }}" limit_item: "{{ item.limit_item }}" - value: "{{item.value}}" - with_items: - - { limit_type: '-', limit_item: 'nofile', value: 128000 } - - { limit_type: '-', limit_item: 'nproc', value: 128000 } - - { limit_type: 'soft', limit_item: 'memlock', value: unlimited } - - { limit_type: 'hard', limit_item: 'memlock', value: unlimited } + value: "{{ item.value }}" + loop: + - + limit_type: '-' + limit_item: 'nofile' + value: 128000 + - + limit_type: '-' + limit_item: 'nproc' + value: 128000 + - + limit_type: 'soft' + limit_item: 'memlock' + value: unlimited + - + limit_type: 'hard' + limit_item: 'memlock' + value: unlimited - name: reload settings from all system configuration files - shell: sysctl --system + command: sysctl --system # SASL Setup # - name: copy SASL config file diff --git a/ansible/playbooks/roles/kafka/tasks/stop.yml b/ansible/playbooks/roles/kafka/tasks/stop.yml deleted file mode 100644 index d8416fb302..0000000000 --- a/ansible/playbooks/roles/kafka/tasks/stop.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -- name: Stop Kafka - service: name=kafka state=stopped diff --git a/ansible/playbooks/roles/kafka/tasks/upgrade/install-upgrade.yml b/ansible/playbooks/roles/kafka/tasks/upgrade/install-upgrade.yml new file mode 100644 index 0000000000..6e2dc9201f --- /dev/null +++ b/ansible/playbooks/roles/kafka/tasks/upgrade/install-upgrade.yml @@ -0,0 +1,34 @@ +--- +- name: Download and unpack Kafka's binary + include_tasks: common/download_and_unpack_binary.yml + +- name: Copy configuration from previous version + copy: + remote_src: true + src: /opt/kafka/config/ + dest: "{{ kafka_install_dir }}/config" + mode: preserve + +- name: Link /opt/kafka to recently installed version + file: + dest: /opt/kafka + state: link + src: "{{ kafka_install_dir }}" + force: true + +- name: Remove previous version binaries + file: + path: /opt/kafka_{{ scala_version }}-{{ old_kafka_version }} + state: absent + +- name: Get log.dirs property + shell: >- + set -o pipefail && + grep log.dirs /opt/kafka/config/server.properties | awk -F'=' '{print $2}' + register: log_dirs + changed_when: false + +- name: Remove lost+found directory from log.dirs + file: + path: "{{ log_dirs.stdout }}/lost+found" + state: absent diff --git a/ansible/playbooks/roles/upgrade/tasks/kafka.yml b/ansible/playbooks/roles/kafka/tasks/upgrade/main.yml similarity index 51% rename from ansible/playbooks/roles/upgrade/tasks/kafka.yml rename to ansible/playbooks/roles/kafka/tasks/upgrade/main.yml index 68798a0953..b7245a8d4f 100644 --- a/ansible/playbooks/roles/upgrade/tasks/kafka.yml +++ b/ansible/playbooks/roles/kafka/tasks/upgrade/main.yml @@ -1,11 +1,6 @@ --- -- name: Include defaults from kafka role - include_vars: - file: roles/kafka/defaults/main.yml - name: kafka_defaults - - name: Include pre-flight checks - include_tasks: kafka/preflight-check.yml + include_tasks: upgrade/preflight-check.yml - name: Get installed Kafka version shell: >- @@ -13,17 +8,16 @@ /opt/kafka/bin/kafka-server-start.sh --version | grep Commit | grep -oP '^\d+\.\d+\.\d+' register: result -- name: Set common facts +- name: Set old_kafka_version fact set_fact: - kafka_version: - old: "{{ result.stdout }}" - new: "{{ kafka_defaults.kafka_version }}" - scala_version: "{{ kafka_defaults.scala_version }}" - kafka_bin_filename: "{{ kafka_defaults.kafka_bin_filename }}" + old_kafka_version: "{{ result.stdout }}" - name: Check for upgrade flag file stat: path: "{{ lock_file }}" + get_attributes: false + get_checksum: false + get_mime: false register: lock_file_status - name: Include upgrade tasks @@ -31,23 +25,22 @@ - name: Create upgrade flag file file: path: "{{ lock_file }}" + mode: u=rw,g=r,o= state: touch - name: Stop Kafka service - service: - name: kafka - state: stopped + include_tasks: common/stop.yml - name: Include update Kafka properties tasks - include_tasks: kafka/update-properties.yml + include_tasks: upgrade/update-properties.yml - name: Include Kafka upgrade tasks - include_tasks: kafka/install-upgrade.yml + include_tasks: upgrade/install-upgrade.yml - name: Include set Kafka version tasks - include_tasks: kafka/set-updated-version.yml + include_tasks: upgrade/set-updated-version.yml when: - - lock_file_status.stat.exists or kafka_version.old is version( kafka_version.new, '<' ) + - lock_file_status.stat.exists or old_kafka_version is version( kafka_version, '<' ) - name: Remove Kafka upgrade flag file file: diff --git a/ansible/playbooks/roles/kafka/tasks/upgrade/preflight-check.yml b/ansible/playbooks/roles/kafka/tasks/upgrade/preflight-check.yml new file mode 100644 index 0000000000..97aa26ac87 --- /dev/null +++ b/ansible/playbooks/roles/kafka/tasks/upgrade/preflight-check.yml @@ -0,0 +1,14 @@ +--- +- name: Check if Kafka is installed in default location + stat: + path: /opt/kafka/bin/kafka-server-start.sh + get_attributes: false + get_checksum: false + get_mime: false + register: kafka_exec_file + +- name: Assert Kafka location + assert: + that: + - kafka_exec_file.stat.exists + fail_msg: Kafka not found in /opt/kafka (Epiphany default) - check your configuration diff --git a/ansible/playbooks/roles/upgrade/tasks/kafka/set-updated-version.yml b/ansible/playbooks/roles/kafka/tasks/upgrade/set-updated-version.yml similarity index 63% rename from ansible/playbooks/roles/upgrade/tasks/kafka/set-updated-version.yml rename to ansible/playbooks/roles/kafka/tasks/upgrade/set-updated-version.yml index 0eda1ed76c..4058df7a55 100644 --- a/ansible/playbooks/roles/upgrade/tasks/kafka/set-updated-version.yml +++ b/ansible/playbooks/roles/kafka/tasks/upgrade/set-updated-version.yml @@ -1,16 +1,16 @@ --- - name: Check if server.properties file exists stat: + get_attributes: false + get_checksum: false + get_mime: false path: /opt/kafka/config/server.properties - name: Modify inter.broker.protocol.version property lineinfile: path: /opt/kafka/config/server.properties regexp: "^inter.broker.protocol.version" - line: "inter.broker.protocol.version={{ kafka_version.new }}" + line: "inter.broker.protocol.version={{ kafka_version }}" - name: Start kafka service - systemd: - name: kafka - state: started - daemon-reload: yes + include_tasks: common/start.yml diff --git a/ansible/playbooks/roles/upgrade/tasks/kafka/update-properties.yml b/ansible/playbooks/roles/kafka/tasks/upgrade/update-properties.yml similarity index 83% rename from ansible/playbooks/roles/upgrade/tasks/kafka/update-properties.yml rename to ansible/playbooks/roles/kafka/tasks/upgrade/update-properties.yml index 7d4b1923e9..584f96f9ba 100644 --- a/ansible/playbooks/roles/upgrade/tasks/kafka/update-properties.yml +++ b/ansible/playbooks/roles/kafka/tasks/upgrade/update-properties.yml @@ -6,12 +6,12 @@ register: current_kafka_version_property failed_when: - result.rc == 2 - changed_when: False + changed_when: false - name: Add current_kafka_version property lineinfile: path: /opt/kafka/config/server.properties - line: "CURRENT_KAFKA_VERSION={{ kafka_version.old }}" + line: "CURRENT_KAFKA_VERSION={{ old_kafka_version }}" when: current_kafka_version_property.stdout == "" - name: Check if inter.broker.protocol.version property is defined @@ -21,10 +21,10 @@ register: inter_broker_protocol_version_property failed_when: - result.rc == 2 - changed_when: False + changed_when: false - name: Add inter.broker.protocol.version property lineinfile: path: /opt/kafka/config/server.properties - line: "inter.broker.protocol.version={{ kafka_version.old }}" + line: "inter.broker.protocol.version={{ old_kafka_version }}" when: inter_broker_protocol_version_property.stdout == "" diff --git a/ansible/playbooks/roles/kafka/tasks/verify-kafka.yml b/ansible/playbooks/roles/kafka/tasks/verify-kafka.yml index dde93e7e33..6d45957bed 100644 --- a/ansible/playbooks/roles/kafka/tasks/verify-kafka.yml +++ b/ansible/playbooks/roles/kafka/tasks/verify-kafka.yml @@ -10,5 +10,5 @@ dest: "/home/{{ admin_user.name }}/kafka_producer_consumer.py" owner: "{{ admin_user.name }}" group: "{{ admin_user.name }}" - mode: 0755 + mode: u=rwx,go=rx src: kafka_producer_consumer.py.j2 diff --git a/ansible/playbooks/roles/kafka/templates/client-ssl.properties.j2 b/ansible/playbooks/roles/kafka/templates/client-ssl.properties.j2 index 81b69ab3d6..c4807c9863 100644 --- a/ansible/playbooks/roles/kafka/templates/client-ssl.properties.j2 +++ b/ansible/playbooks/roles/kafka/templates/client-ssl.properties.j2 @@ -1,4 +1,4 @@ -bootstrap.servers={{ kafka_hosts }}:{{ specification.kafka_var.security.ssl.port }} +bootstrap.servers={{ kafka_hosts }}:{{ specification.security.ssl.port }} security.protocol=SSL ssl.truststore.location=/var/private/ssl/kafka.client.truststore.jks -ssl.truststore.password={{ specification.kafka_var.security.ssl.client.passwords.truststore }} +ssl.truststore.password={{ specification.security.ssl.client.passwords.truststore }} diff --git a/ansible/playbooks/roles/kafka/templates/kafka.service.j2 b/ansible/playbooks/roles/kafka/templates/kafka.service.j2 index 18c69243ee..71d6815140 100644 --- a/ansible/playbooks/roles/kafka/templates/kafka.service.j2 +++ b/ansible/playbooks/roles/kafka/templates/kafka.service.j2 @@ -2,8 +2,8 @@ Description=Kafka Daemon After=zookeeper.service -{% if specification.kafka_var.javax_net_debug is defined %} -{% set javax_debug = '-Djavax.net.debug=' ~ specification.kafka_var.javax_net_debug %} +{% if specification.javax_net_debug is defined %} +{% set javax_debug = '-Djavax.net.debug=' ~ specification.javax_net_debug %} {% else %} {% set javax_debug = '' %} {% endif %} @@ -14,14 +14,14 @@ User=kafka Group=kafka LimitNOFILE=32768 Restart=on-failure -Environment="KAFKA_HEAP_OPTS={{ specification.kafka_var.heap_opts }}" -Environment="LOG_DIR={{ specification.kafka_var.log_dir }}" +Environment="KAFKA_HEAP_OPTS={{ specification.heap_opts }}" +Environment="LOG_DIR={{ specification.log_dir }}" {% if exporter.stat.exists %} Environment="KAFKA_OPTS={{ javax_debug }} -javaagent:{{ prometheus_jmx_exporter_path }}={{ specification.prometheus_jmx_exporter_web_listen_port }}:{{ specification.prometheus_jmx_config }}" {% else %} Environment="KAFKA_OPTS={{ javax_debug }}" {% endif %} -Environment="KAFKA_JMX_OPTS={{ specification.kafka_var.jmx_opts }}" +Environment="KAFKA_JMX_OPTS={{ specification.jmx_opts }}" ExecStart=/opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/server.properties [Install] diff --git a/ansible/playbooks/roles/kafka/templates/kafka_producer_consumer.py.j2 b/ansible/playbooks/roles/kafka/templates/kafka_producer_consumer.py.j2 index 0f798a4480..b3bf8e47a3 100644 --- a/ansible/playbooks/roles/kafka/templates/kafka_producer_consumer.py.j2 +++ b/ansible/playbooks/roles/kafka/templates/kafka_producer_consumer.py.j2 @@ -20,8 +20,8 @@ class Producer(threading.Thread): producer = KafkaProducer(bootstrap_servers='{{ kafka_hosts }}') while not self.stop_event.is_set(): - {% for msg in specification.kafka_var.tests.epiphany_topic_test_msgs %} - producer.send('{{ specification.kafka_var.tests.epiphany_topic_test }}', b"{{ msg }}") + {% for msg in specification.tests.epiphany_topic_test_msgs %} + producer.send('{{ specification.tests.epiphany_topic_test }}', b"{{ msg }}") {% endfor %} time.sleep(1) @@ -40,7 +40,7 @@ class Consumer(multiprocessing.Process): consumer = KafkaConsumer(bootstrap_servers='{{ kafka_hosts }}', auto_offset_reset='earliest', consumer_timeout_ms=1000) - consumer.subscribe(['{{ specification.kafka_var.tests.epiphany_topic_test }}']) + consumer.subscribe(['{{ specification.tests.epiphany_topic_test }}']) while not self.stop_event.is_set(): for message in consumer: diff --git a/ansible/playbooks/roles/kafka/templates/kafka_server_jaas.conf.j2 b/ansible/playbooks/roles/kafka/templates/kafka_server_jaas.conf.j2 index ca801c1b8b..176c77ffe7 100644 --- a/ansible/playbooks/roles/kafka/templates/kafka_server_jaas.conf.j2 +++ b/ansible/playbooks/roles/kafka/templates/kafka_server_jaas.conf.j2 @@ -1,8 +1,8 @@ KafkaServer { org.apache.kafka.common.security.plain.PlainLoginModule required - username="{{ specification.kafka_var.admin }}" - password="{{ specification.kafka_var.admin_pwd }}" - user_admin="{{ specification.kafka_var.admin_pwd }}" + username="{{ specification.admin }}" + password="{{ specification.admin_pwd }}" + user_admin="{{ specification.admin_pwd }}" {%- for host in kafka_hosts %} user_{{host}}="kafkabroker1-secret"; {%- endfor %} diff --git a/ansible/playbooks/roles/kafka/templates/logrotate.conf.j2 b/ansible/playbooks/roles/kafka/templates/logrotate.conf.j2 index 3c0d37d799..ad2b60e6d1 100644 --- a/ansible/playbooks/roles/kafka/templates/logrotate.conf.j2 +++ b/ansible/playbooks/roles/kafka/templates/logrotate.conf.j2 @@ -1,4 +1,4 @@ -{{ specification.kafka_var.log_dir }}/*.log { +{{ specification.log_dir }}/*.log { rotate 5 daily compress diff --git a/ansible/playbooks/roles/kafka/templates/server.properties.j2 b/ansible/playbooks/roles/kafka/templates/server.properties.j2 index 6f2643beac..63ec2cd81b 100644 --- a/ansible/playbooks/roles/kafka/templates/server.properties.j2 +++ b/ansible/playbooks/roles/kafka/templates/server.properties.j2 @@ -11,23 +11,23 @@ auto.create.topics.enable=true delete.topic.enable=true #default replication factors for automatically created topics -default.replication.factor={{ specification.kafka_var.default_replication_factor }} +default.replication.factor={{ specification.default_replication_factor }} #The replication factor for the offsets topic (set higher to ensure availability). Internal topic creation will fail until the cluster size meets this replication factor requirement. -offsets.topic.replication.factor={{ specification.kafka_var.offsets_topic_replication_factor }} +offsets.topic.replication.factor={{ specification.offsets_topic_replication_factor }} #Offsets older than this retention period will be discarded -offsets.retention.minutes={{ specification.kafka_var.offset_retention_minutes }} +offsets.retention.minutes={{ specification.offset_retention_minutes }} #The maximum number of incremental fetch sessions that we will maintain -max.incremental.fetch.session.cache.slots={{ specification.kafka_var.max_incremental_fetch_session_cache_slots }} +max.incremental.fetch.session.cache.slots={{ specification.max_incremental_fetch_session_cache_slots }} #Enable controlled shutdown of the server -controlled.shutdown.enable={{ specification.kafka_var.controlled_shutdown_enable | lower }} +controlled.shutdown.enable={{ specification.controlled_shutdown_enable | lower }} #Number of fetcher threads used to replicate messages from a source broker. #Increasing this value can increase the degree of I/O parallelism in the follower broker. -num.replica.fetchers={{ specification.kafka_var.num_replica_fetchers }} +num.replica.fetchers={{ specification.num_replica_fetchers }} #The number of bytes of messages to attempt to fetch for each partition. #This is not an absolute maximum, if the first record batch in the first @@ -35,10 +35,10 @@ num.replica.fetchers={{ specification.kafka_var.num_replica_fetchers }} #the record batch will still be returned to ensure that progress can be made. #The maximum record batch size accepted by the broker is defined #via message.max.bytes (broker config) or max.message.bytes (topic config). -replica.fetch.max.bytes={{ specification.kafka_var.replica_fetch_max_bytes }} +replica.fetch.max.bytes={{ specification.replica_fetch_max_bytes }} #The socket receive buffer for network requests -replica.socket.receive.buffer.bytes={{ specification.kafka_var.replica_socket_receive_buffer_bytes }} +replica.socket.receive.buffer.bytes={{ specification.replica_socket_receive_buffer_bytes }} ############################# Socket Server Settings ############################# {%- endif %} @@ -50,10 +50,10 @@ replica.socket.receive.buffer.bytes={{ specification.kafka_var.replica_socket_re # listeners = listener_name://host_name:port # EXAMPLE: # listeners = PLAINTEXT://your.host.name:9092 -{% if specification.kafka_var.security.ssl.enabled -%} -listeners=SSL://{{ inventory_hostname }}:{{ specification.kafka_var.security.ssl.port }} +{% if specification.security.ssl.enabled -%} +listeners=SSL://{{ inventory_hostname }}:{{ specification.security.ssl.port }} {% else %} -listeners=PLAINTEXT://{{ ansible_default_ipv4.address }}:{{ specification.kafka_var.port }} +listeners=PLAINTEXT://{{ ansible_default_ipv4.address }}:{{ specification.port }} {%- endif %} # Hostname and port the broker will advertise to producers and consumers. If not set, @@ -65,47 +65,47 @@ listeners=PLAINTEXT://{{ ansible_default_ipv4.address }}:{{ specification.kafka_ #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL # The number of threads handling network requests -num.network.threads={{ specification.kafka_var.socket_settings.network_threads }} +num.network.threads={{ specification.socket_settings.network_threads }} # The number of threads doing disk I/O -num.io.threads={{ specification.kafka_var.socket_settings.io_threads }} +num.io.threads={{ specification.socket_settings.io_threads }} # The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes={{ specification.kafka_var.socket_settings.send_buffer_bytes }} +socket.send.buffer.bytes={{ specification.socket_settings.send_buffer_bytes }} # The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes={{ specification.kafka_var.socket_settings.receive_buffer_bytes }} +socket.receive.buffer.bytes={{ specification.socket_settings.receive_buffer_bytes }} # The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes={{ specification.kafka_var.socket_settings.request_max_bytes }} +socket.request.max.bytes={{ specification.socket_settings.request_max_bytes }} ############################# Security ######################################### # Security protocol used to communicate between brokers -{% if specification.kafka_var.security.ssl.enabled -%} +{% if specification.security.ssl.enabled -%} # If not enabled it should default to PLAINTEXT -security.inter.broker.protocol={{ specification.kafka_var.security.inter_broker_protocol }} +security.inter.broker.protocol={{ specification.security.inter_broker_protocol }} #### Encryption Settings #### -ssl.endpoint.identification.algorithm={{ specification.kafka_var.security.ssl.endpoint_identification_algorithm }} +ssl.endpoint.identification.algorithm={{ specification.security.ssl.endpoint_identification_algorithm }} -ssl.keystore.location={{ specification.kafka_var.security.ssl.server.keystore_location }} -ssl.keystore.password={{ specification.kafka_var.security.ssl.server.passwords.keystore }} -ssl.truststore.location={{ specification.kafka_var.security.ssl.server.truststore_location }} -ssl.truststore.password={{ specification.kafka_var.security.ssl.server.passwords.truststore }} -ssl.key.password={{ specification.kafka_var.security.ssl.server.passwords.key }} +ssl.keystore.location={{ specification.security.ssl.server.keystore_location }} +ssl.keystore.password={{ specification.security.ssl.server.passwords.keystore }} +ssl.truststore.location={{ specification.security.ssl.server.truststore_location }} +ssl.truststore.password={{ specification.security.ssl.server.passwords.truststore }} +ssl.key.password={{ specification.security.ssl.server.passwords.key }} -ssl.client.auth={{ specification.kafka_var.security.ssl.client_auth }} +ssl.client.auth={{ specification.security.ssl.client_auth }} {%- endif %} -{% if specification.kafka_var.security.authentication.enabled %} -{% if specification.kafka_var.security.authentication.authentication_method == "sasl" -%} +{% if specification.security.authentication.enabled %} +{% if specification.security.authentication.authentication_method == "sasl" -%} #### Authentication Settings #### # SASL mechanism used for inter-broker communication. -sasl.mechanism.inter.broker.protocol={{ specification.kafka_var.security.authentication.sasl_mechanism_inter_broker_protocol }} +sasl.mechanism.inter.broker.protocol={{ specification.security.authentication.sasl_mechanism_inter_broker_protocol }} -sasl.enabled.mechanisms={{ specification.kafka_var.security.sasl_authentication.enabled_mechanisms }} +sasl.enabled.mechanisms={{ specification.security.sasl_authentication.enabled_mechanisms }} # The list of SASL mechanisms enabled in the Kafka server. The list may contain any mechanism # for which a security provider is available. Only GSSAPI is enabled by default. @@ -113,23 +113,23 @@ sasl.enabled.mechanisms={{ specification.kafka_var.security.sasl_authentication. ############################# ACLs ############################################# # The authorizer class that should be used for authorization -authorizer.class.name={{ specification.kafka_var.security.authorization.authorizer_class_name }} +authorizer.class.name={{ specification.security.authorization.authorizer_class_name }} # If a Resource R has no associated ACLs, no one other than super users is allowed to access R. If you want to change that behavior, set this property to true -allow.everyone.if.no.acl.found={{ specification.kafka_var.security.authorization.allow_everyone_if_no_acl_found }} +allow.everyone.if.no.acl.found={{ specification.security.authorization.allow_everyone_if_no_acl_found }} -{% if specification.kafka_var.security.authentication.enabled and specification.kafka_var.security.authorization.enabled -%} +{% if specification.security.authentication.enabled and specification.security.authorization.enabled -%} -{% if specification.kafka_var.security.authentication.authentication_method == "certificates" -%} +{% if specification.security.authentication.authentication_method == "certificates" -%} {% set super_users = groups['kafka'] %} -{% if specification.kafka_var.security.authorization.super_users is defined -%} -{% set super_users = super_users + specification.kafka_var.security.authorization.super_users %} +{% if specification.security.authorization.super_users is defined -%} +{% set super_users = super_users + specification.security.authorization.super_users %} {%- endif %} super.users=User:CN={{ super_users | list | join(';User:CN=') }}; {%- endif %} -{% if specification.kafka_var.security.authentication.authentication_method == "sasl" and specification.kafka_var.security.authorization.super_users is defined -%} -super.users={{ specification.kafka_var.security.authorization.super_users }} +{% if specification.security.authentication.authentication_method == "sasl" and specification.security.authorization.super_users is defined -%} +super.users={{ specification.security.authorization.super_users }} {%- endif %} {%- endif %} @@ -138,20 +138,20 @@ super.users={{ specification.kafka_var.security.authorization.super_users }} ############################# Log Basics ############################# # A comma seperated list of directories under which to store log files -log.dirs={{ specification.kafka_var.data_dir }} +log.dirs={{ specification.data_dir }} # The default number of log partitions per topic. More partitions allow greater # parallelism for consumption, but this will also result in more files across # the brokers. -num.partitions={{ specification.kafka_var.partitions }} +num.partitions={{ specification.partitions }} # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. # This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir={{ specification.kafka_var.num_recovery_threads_per_data_dir }} +num.recovery.threads.per.data.dir={{ specification.num_recovery_threads_per_data_dir }} # When a producer sets acks to "all" (or "-1"), this configuration specifies the minimum number # of replicas that must acknowledge a write for the write to be considered successful. -min.insync.replicas={{ specification.kafka_var.min_insync_replicas }} +min.insync.replicas={{ specification.min_insync_replicas }} ############################# Log Flush Policy ############################# @@ -178,11 +178,11 @@ min.insync.replicas={{ specification.kafka_var.min_insync_replicas }} # from the end of the log. # The minimum age of a log file to be eligible for deletion due to age -log.retention.hours={{ specification.kafka_var.log_retention_hours }} +log.retention.hours={{ specification.log_retention_hours }} # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining # segments don't drop below log.retention.bytes. Functions independently of log.retention.hours. -log.retention.bytes={{ specification.kafka_var.log_retention_bytes }} +log.retention.bytes={{ specification.log_retention_bytes }} # The maximum size of a log segment file. When this size is reached a new log segment will be created. log.segment.bytes=1073741824 diff --git a/ansible/playbooks/roles/kafka_exporter/tasks/upgrade/main.yml b/ansible/playbooks/roles/kafka_exporter/tasks/upgrade/main.yml index b309f8fa55..d262b42f5c 100644 --- a/ansible/playbooks/roles/kafka_exporter/tasks/upgrade/main.yml +++ b/ansible/playbooks/roles/kafka_exporter/tasks/upgrade/main.yml @@ -101,3 +101,13 @@ file: path: "{{ lock_file }}" state: absent + + - name: Set kafka_exporter_upgraded flag + set_fact: + kafka_exporter_upgraded: True + +# Between versions v1.3 and v2.0 there is no kafka-exporter version upgrade, but there is kafka upgrade +# For this reason kafka-exporter service definition needs to be updated +- name: Verify if kafka-exporter service definition needs to be updated + include_tasks: upgrade/verify-service-definition.yml + when: kafka_exporter_upgraded is not defined diff --git a/ansible/playbooks/roles/kafka_exporter/tasks/upgrade/verify-service-definition.yml b/ansible/playbooks/roles/kafka_exporter/tasks/upgrade/verify-service-definition.yml new file mode 100644 index 0000000000..27ee46e2c1 --- /dev/null +++ b/ansible/playbooks/roles/kafka_exporter/tasks/upgrade/verify-service-definition.yml @@ -0,0 +1,46 @@ +--- +- name: Get installed Kafka version + shell: >- + set -o pipefail && + /opt/kafka/bin/kafka-server-start.sh --version | grep Commit | grep -oP '^\d+\.\d+\.\d+' + register: result + +- name: Set current_kafka_version fact + set_fact: + current_kafka_version: "{{ result.stdout }}" + +- name: Fetch kafka-exporter.service file from the remote + slurp: + src: /etc/systemd/system/kafka-exporter.service + register: kafka_exporter_service_definition + +- name: Parse kafka-exporter.service content + set_fact: + _exporter_service_definition_content: "{{ kafka_exporter_service_definition['content'] | b64decode | from_ini }}" + +- name: Get kafka version used in service definition + set_fact: + kafka_version_in_definition: "{{ _kafka_version_in_definition[0].split('=')[-1] }}" + vars: + _kafka_version_in_definition: + "{{ _exporter_service_definition_content['Service']['execstart'].split(' ') | select('search', 'kafka.version') }}" + +- name: Update service definition + when: current_kafka_version is not version( kafka_version_in_definition, '==') + block: + - name: Update service + template: + src: kafka-exporter.service.j2 + dest: /etc/systemd/system/kafka-exporter.service + owner: root + group: root + mode: u=rw,go=r + + - name: Restart kafka-exporter service + service: + name: kafka-exporter + state: restarted + + - name: Reload systemd daemons + systemd: + daemon_reload: true diff --git a/ansible/playbooks/roles/kafka_exporter/templates/kafka-exporter.service.j2 b/ansible/playbooks/roles/kafka_exporter/templates/kafka-exporter.service.j2 index 9ed7ebbe63..b961102ce8 100644 --- a/ansible/playbooks/roles/kafka_exporter/templates/kafka-exporter.service.j2 +++ b/ansible/playbooks/roles/kafka_exporter/templates/kafka-exporter.service.j2 @@ -7,7 +7,7 @@ After=kafka.service User=kafka_exporter Group=kafka_exporter ExecStartPre=/bin/bash -c '(while ! ss -H -t -l -n sport = :9092 | grep -q "^LISTEN.*:9092"; do echo "Waiting for Kafka Broker port to be listening..."; sleep 2; done)' -ExecStart=/opt/kafka_exporter/kafka_exporter {{ kafka_instances }} {% for flag in specification.config_flags %} {{ flag }} {% endfor %} +ExecStart=/opt/kafka_exporter/kafka_exporter {{ kafka_instances }} {% for flag in specification.config_flags %} {{ flag }} {% endfor %} --kafka.version=2.8.1 SyslogIdentifier=kafka_exporter Restart=always diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml index 27734b80ec..cf1c6844dc 100644 --- a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml @@ -14,8 +14,8 @@ files: sha256: 1d1a008c5e29673b404a9ce119b7516fa59974aeda2f47d4a0446d102abce8a1 # --- Misc --- - 'https://archive.apache.org/dist/kafka/2.6.0/kafka_2.12-2.6.0.tgz': - sha256: 086bf9ca1fcbe2abe5c62e73d6f172adb1ee5a5b42732e153fb4d4ec82dab69f + 'https://archive.apache.org/dist/kafka/2.8.1/kafka_2.12-2.8.1.tgz': + sha256: 175a4134efc569a586d58916cd16ce70f868b13dea2b5a3d12a67b1395d59f98 'https://archive.apache.org/dist/zookeeper/zookeeper-3.5.8/apache-zookeeper-3.5.8-bin.tar.gz': sha256: c35ed6786d59b73920243f1a324d24c2ddfafb379041d7a350cc9a341c52caf3 diff --git a/ansible/playbooks/roles/upgrade/tasks/kafka/install-upgrade.yml b/ansible/playbooks/roles/upgrade/tasks/kafka/install-upgrade.yml deleted file mode 100644 index eb2448e81d..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/kafka/install-upgrade.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -- name: Set Kafka installation file name as fact - set_fact: - kafka_installation_file_name: "kafka_{{ scala_version }}-{{ kafka_version.new }}.tgz" - -- name: Download Kafka binaries - include_role: - name: download - tasks_from: download_file - vars: - file_name: "{{ kafka_installation_file_name }}" - -- name: Uncompress Kafka installation file - unarchive: - remote_src: yes - src: "{{ download_directory }}/{{ kafka_installation_file_name }}" - dest: /opt - -- name: Change ownership on kafka directory - file: - path: /opt/kafka_{{ scala_version }}-{{ kafka_version.new }} - state: directory - owner: kafka - group: kafka - -- name: Copy configuration from previous version - copy: - remote_src: yes - src: /opt/kafka/config/ - dest: /opt/kafka_{{ scala_version }}-{{ kafka_version.new }}/config - mode: preserve - -- name: Link /opt/kafka to recently installed version - file: - dest: /opt/kafka - state: link - src: /opt/kafka_{{ scala_version }}-{{ kafka_version.new }} - force: yes - -- name: Remove previous version binaries - file: - path: /opt/kafka_{{ scala_version }}-{{ kafka_version.old }} - state: absent - -- name: Get log.dirs property - shell: >- - set -o pipefail && - grep log.dirs /opt/kafka/config/server.properties | awk -F'=' '{print $2}' - register: log_dirs - changed_when: False - -- name: Remove lost+found directory from log.dirs - file: - path: "{{ log_dirs.stdout }}/lost+found" - state: absent diff --git a/ansible/playbooks/roles/upgrade/tasks/kafka/preflight-check.yml b/ansible/playbooks/roles/upgrade/tasks/kafka/preflight-check.yml deleted file mode 100644 index ec4283ddb9..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/kafka/preflight-check.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - - name: Check if Kafka is installed in default location - stat: - path: /opt/kafka/bin/kafka-server-start.sh - register: kafka_exec_file - - - name: Assert Kafka location - assert: - that: - - kafka_exec_file.stat.exists - fail_msg: Kafka not found in /opt/kafka (Epiphany default) - check your configuration \ No newline at end of file diff --git a/ansible/playbooks/roles/upgrade/tasks/zookeeper/install-upgrade.yml b/ansible/playbooks/roles/upgrade/tasks/zookeeper/install-upgrade.yml deleted file mode 100644 index 7f43d185af..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/zookeeper/install-upgrade.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- - -# Some tasks in this file are the same as in zookeeper role. It should be refactored (with splitting code into separate files) in order to reuse common tasks here. -- name: Download Zookeeper binaries - include_role: - name: download - tasks_from: download_file - vars: - file_name: "{{ zookeeper_defaults.zookeeper_bin_filename }}" - -- name: Create Zookeeper directories - become: yes - file: - path: "/opt/zookeeper-{{ zookeeper_defaults.zookeeper_version }}" - recurse: yes - owner: "{{ zookeeper_defaults.zookeeper_user }}" - group: "{{ zookeeper_defaults.zookeeper_group }}" - mode: u=rwx,g=rx,o=rx - state: directory - -- name: Uncompress Zookeeper installation file - unarchive: - remote_src: yes - src: "{{ download_directory }}/{{ zookeeper_defaults.zookeeper_bin_filename }}" - dest: /opt/zookeeper-{{ zookeeper_defaults.zookeeper_version }} - creates: "/opt/zookeeper-{{ zookeeper_defaults.zookeeper_version }}/bin" - extra_opts: [--strip-components=1] - mode: u=rwx,g=rx,o=rx - owner: "{{ zookeeper_defaults.zookeeper_user }}" - group: "{{ zookeeper_defaults.zookeeper_group }}" - -- name: Copy configuration from previous version - copy: - remote_src: yes - src: /opt/zookeeper/conf/ - dest: /opt/zookeeper-{{ zookeeper_defaults.zookeeper_version }}/conf - mode: preserve - -- name: Link /opt/zookeeper to recently installed version - file: - dest: /opt/zookeeper - state: link - src: /opt/zookeeper-{{ zookeeper_defaults.zookeeper_version }} - force: yes - -- name: Reconfigure Zookeeper service to use symbolic link - lineinfile: - path: /lib/systemd/system/zookeeper.service - state: present - regexp: '^ExecStart=/opt/zookeeper-.*' - line: "ExecStart=/opt/zookeeper/bin/zkServer.sh start-foreground" - -- name: Check if any snapshots exists in data dir what is necessary in order to run zookeeper after upgrade - find: - paths: "{{ zookeeper_defaults.zookeeper_data_dir }}/version-2" - patterns: "snapshot.*" - register: snapshot_exists - -# From 3.5.5 version, ZooKeeper is not able to start when no snapshot files present, what is valid scenario in 3.4.X version. Empty snapshot downloaded from Zookeeper's Jira ticket. -- name: Copy empty snapshot if not exists - copy: - dest: "{{ zookeeper_defaults.zookeeper_data_dir }}/version-2" - src: roles/zookeeper/files/snapshot.0 - mode: u=rw,g=r,o=r - owner: "{{ zookeeper_defaults.zookeeper_user }}" - group: "{{ zookeeper_defaults.zookeeper_group }}" - when: snapshot_exists.matched == 0 - -- name: Start Zookeeper service - systemd: - name: zookeeper - state: started - daemon-reload: yes - -- name: Remove previous version binaries - file: - path: /opt/zookeeper-{{ before_upgrade_zookeeper_version }} - state: absent - when: before_upgrade_zookeeper_version != zookeeper_defaults.zookeeper_version diff --git a/ansible/playbooks/roles/upgrade/tasks/zookeeper/preflight-check.yml b/ansible/playbooks/roles/upgrade/tasks/zookeeper/preflight-check.yml deleted file mode 100644 index b5c9cb6cc6..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/zookeeper/preflight-check.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- - - name: Check if Zookeeper is installed in default location - stat: - path: /opt/zookeeper/bin/zkServer.sh - get_attributes: false - get_checksum: false - get_mime: false - register: zookeeper_exec_file - - - name: Assert Zookeeper location - assert: - that: - - zookeeper_exec_file.stat.exists - fail_msg: Zookeeper not found in /opt/zookeeper (Epiphany default) - check your configuration diff --git a/ansible/playbooks/roles/zookeeper/defaults/main.yml b/ansible/playbooks/roles/zookeeper/defaults/main.yml index d74ce924bb..bb7d4fe902 100644 --- a/ansible/playbooks/roles/zookeeper/defaults/main.yml +++ b/ansible/playbooks/roles/zookeeper/defaults/main.yml @@ -5,6 +5,7 @@ zookeeper_bin_filename: "apache-zookeeper-3.5.8-bin.tar.gz" zookeeper_hosts: "{{ groups['zookeeper'] }}" +zookeeper_install_dir: "/opt/zookeeper-{{ zookeeper_version }}" zookeeper_data_dir: /var/lib/zookeeper zookeeper_log_dir: /var/log/zookeeper diff --git a/ansible/playbooks/roles/zookeeper/handlers/main.yml b/ansible/playbooks/roles/zookeeper/handlers/main.yml index 1dd6e5db57..f4c3fd1480 100644 --- a/ansible/playbooks/roles/zookeeper/handlers/main.yml +++ b/ansible/playbooks/roles/zookeeper/handlers/main.yml @@ -16,4 +16,4 @@ state: restarted delegate_to: "{{ item }}" with_inventory_hostnames: - - prometheus \ No newline at end of file + - prometheus diff --git a/ansible/playbooks/roles/zookeeper/tasks/common/download_and_unpack_binary.yml b/ansible/playbooks/roles/zookeeper/tasks/common/download_and_unpack_binary.yml new file mode 100644 index 0000000000..a07982e145 --- /dev/null +++ b/ansible/playbooks/roles/zookeeper/tasks/common/download_and_unpack_binary.yml @@ -0,0 +1,30 @@ +--- +- name: Download Zookeeper binary + include_role: + name: download + tasks_from: download_file + vars: + file_name: "{{ zookeeper_bin_filename }}" + +- name: Create {{ zookeeper_install_dir }} directory + become: true + file: + path: "{{ zookeeper_install_dir }}" + recurse: true + owner: "{{ zookeeper_user }}" + group: "{{ zookeeper_group }}" + mode: u=rwx,g=rx,o=rx + state: directory + +- name: Unpack Zookeeper-{{ zookeeper_version }} binary + become: true + unarchive: + remote_src: true + src: "{{ download_directory }}/{{ zookeeper_bin_filename }}" + dest: "{{ zookeeper_install_dir }}" + creates: "{{ zookeeper_install_dir }}/bin" + extra_opts: [--strip-components=1] + mode: u=rwx,g=rx,o=rx + owner: "{{ zookeeper_user }}" + group: "{{ zookeeper_group }}" + check_mode: false diff --git a/ansible/playbooks/roles/zookeeper/tasks/main.yml b/ansible/playbooks/roles/zookeeper/tasks/main.yml index ba5a093080..6ed5cb8ae9 100644 --- a/ansible/playbooks/roles/zookeeper/tasks/main.yml +++ b/ansible/playbooks/roles/zookeeper/tasks/main.yml @@ -2,13 +2,13 @@ - name: Create Zookeeper group group: name: "{{ zookeeper_group }}" - system: yes + system: true - name: Create Zookeeper user user: name: "{{ zookeeper_user }}" group: "{{ zookeeper_group }}" - system: yes + system: true shell: "/usr/sbin/nologin" - name: Install Java package @@ -22,62 +22,17 @@ RedHat: - java-1.8.0-openjdk-headless module_defaults: - yum: { lock_timeout: "{{ yum_lock_timeout }}" } + yum: + lock_timeout: "{{ yum_lock_timeout }}" -- name: Check if jmx exporter is available - stat: - path: "{{ prometheus_jmx_exporter_path }}" - register: exporter - -- name: Set Zookeeper variable with version name - set_fact: - zookeeper_name: "zookeeper-{{ zookeeper_version }}" - changed_when: false - -- name: Set Zookeeper install dir for {{ zookeeper_name }} - set_fact: - zookeeper_install_dir: "/opt/{{ zookeeper_name }}" - -- name: Set Zookeeper file name to install - set_fact: - zookeeper_file_name: "{{ zookeeper_bin_filename }}" - -- name: Download Zookeeper binaries - include_role: - name: download - tasks_from: download_file - vars: - file_name: "{{ zookeeper_file_name }}" - -- name: Create {{ zookeeper_install_dir }} directories - become: yes - file: - path: "{{ item }}" - recurse: yes - owner: "{{ zookeeper_user }}" - group: "{{ zookeeper_group }}" - mode: u=rwx,g=rx,o= - state: directory - with_items: - - "{{ zookeeper_install_dir }}" - -- name: Unpack {{ zookeeper_name }} binary - become: true - unarchive: - remote_src: yes - src: "{{ download_directory }}/{{ zookeeper_file_name }}" - dest: "{{ zookeeper_install_dir }}" - creates: "{{ zookeeper_install_dir }}/bin" - extra_opts: [--strip-components=1] - mode: u=rwx,g=rx,o=rx - owner: "{{ zookeeper_user }}" - group: "{{ zookeeper_group }}" - check_mode: false +- name: Download and unpack Zookeeper's binary + include_tasks: common/download_and_unpack_binary.yml - name: Create directories file: path: "{{ item }}" state: directory + mode: u=rwx,go=rx owner: "{{ zookeeper_user }}" group: "{{ zookeeper_group }}" with_items: @@ -86,7 +41,15 @@ - "{{ zookeeper_install_dir }}/conf" - "/etc/zookeeper/conf" -- name: Create Zookeeper service +- name: Check if jmx exporter is available + stat: + path: "{{ prometheus_jmx_exporter_path }}" + get_attributes: false + get_checksum: false + get_mime: false + register: exporter + +- name: Create Zookeeper service # noqa risky-file-permissions (https://github.com/ansible-community/ansible-lint/pull/1030) template: src: zookeeper.service.j2 dest: /lib/systemd/system/zookeeper.service @@ -101,9 +64,10 @@ dest: /var/lib/zookeeper/myid owner: "{{ zookeeper_user }}" group: "{{ zookeeper_group }}" + mode: preserve notify: Restart zookeeper -- name: Configure Zookeeper +- name: Configure Zookeeper # noqa risky-file-permissions (https://github.com/ansible-community/ansible-lint/pull/1030) template: src: zoo.cfg.j2 dest: "{{ zookeeper_install_dir }}/conf/zoo.cfg" @@ -112,7 +76,10 @@ notify: Restart zookeeper - name: Link /opt/zookeeper to the right version - file: path=/opt/zookeeper state=link src="{{ zookeeper_install_dir }}" + file: + path: /opt/zookeeper + state: link + src: "{{ zookeeper_install_dir }}" - name: Add Zookeeper's bin dir to the PATH copy: @@ -120,23 +87,18 @@ dest: "/etc/profile.d/zookeeper_path.sh" mode: u=rwx,g=rx,o=rx -- name: Update the log4j config with saner production values +- name: Update the log4j config with saner production values # noqa risky-file-permissions (https://github.com/ansible-community/ansible-lint/pull/1030) template: src: log4j.properties.j2 dest: "{{ zookeeper_install_dir }}/conf/log4j.properties" notify: - Restart zookeeper -- name: Enable Zookeeper service - service: - name: zookeeper - enabled: yes - -- name: Start Zookeeper +- name: Enable and start Zookeeper service service: name: zookeeper + enabled: true state: started - enabled: yes - include_tasks: metrics.yml when: exporter.stat.exists diff --git a/ansible/playbooks/roles/zookeeper/tasks/metrics.yml b/ansible/playbooks/roles/zookeeper/tasks/metrics.yml index 7e5e838131..99aba036ae 100644 --- a/ansible/playbooks/roles/zookeeper/tasks/metrics.yml +++ b/ansible/playbooks/roles/zookeeper/tasks/metrics.yml @@ -2,10 +2,10 @@ user: name: "{{ zookeeper_user }}" groups: "{{ jmx_exporter_group }}" - append: yes + append: true - name: prometheus jmx | configuration file - become: yes + become: true copy: dest: "{{ prometheus_jmx_config }}" src: jmx-zookeeper-config.yml diff --git a/ansible/playbooks/roles/zookeeper/tasks/upgrade/install-upgrade.yml b/ansible/playbooks/roles/zookeeper/tasks/upgrade/install-upgrade.yml new file mode 100644 index 0000000000..c3fbd77e17 --- /dev/null +++ b/ansible/playbooks/roles/zookeeper/tasks/upgrade/install-upgrade.yml @@ -0,0 +1,53 @@ +--- +- name: Download and unpack Zookeeper's binary + include_tasks: common/download_and_unpack_binary.yml + +- name: Copy configuration from previous version + copy: + remote_src: true + src: /opt/zookeeper/conf/ + dest: "{{ zookeeper_install_dir }}}/conf" + mode: preserve + +- name: Link /opt/zookeeper to recently installed version + file: + path: /opt/zookeeper + state: link + src: "{{ zookeeper_install_dir }}" + force: true + +- name: Reconfigure Zookeeper service to use symbolic link + lineinfile: + path: /lib/systemd/system/zookeeper.service + state: present + regexp: '^ExecStart=/opt/zookeeper-.*' + line: "ExecStart=/opt/zookeeper/bin/zkServer.sh start-foreground" + +- name: Check if any snapshots exists in data dir what is necessary in order to run zookeeper after upgrade + find: + paths: "{{ zookeeper_data_dir }}/version-2" + patterns: "snapshot.*" + register: snapshot_exists + +# From 3.5.5 version, ZooKeeper is not able to start when no snapshot files present, what is valid scenario in 3.4.X version. +# Empty snapshot downloaded from Zookeeper's Jira ticket. +- name: Copy empty snapshot if not exists + copy: + dest: "{{ zookeeper_data_dir }}/version-2" + src: roles/zookeeper/files/snapshot.0 + mode: u=rw,g=r,o=r + owner: "{{ zookeeper_user }}" + group: "{{ zookeeper_group }}" + when: snapshot_exists.matched == 0 + +- name: Start Zookeeper service + systemd: + name: zookeeper + state: started + daemon-reload: true + +- name: Remove previous version binaries + file: + path: /opt/zookeeper-{{ before_upgrade_zookeeper_version }} + state: absent + when: before_upgrade_zookeeper_version != zookeeper_version diff --git a/ansible/playbooks/roles/upgrade/tasks/zookeeper.yml b/ansible/playbooks/roles/zookeeper/tasks/upgrade/main.yml similarity index 73% rename from ansible/playbooks/roles/upgrade/tasks/zookeeper.yml rename to ansible/playbooks/roles/zookeeper/tasks/upgrade/main.yml index 8e4778e0b9..18621892d7 100644 --- a/ansible/playbooks/roles/upgrade/tasks/zookeeper.yml +++ b/ansible/playbooks/roles/zookeeper/tasks/upgrade/main.yml @@ -1,11 +1,6 @@ --- -- name: Include defaults from zookeeper role - include_vars: - file: roles/zookeeper/defaults/main.yml - name: zookeeper_defaults - - name: Include pre-flight checks - include_tasks: zookeeper/preflight-check.yml + include_tasks: upgrade/preflight-check.yml - name: Get installed Zookeeper's version stat: @@ -41,9 +36,10 @@ state: stopped - name: Include upgrade Zookeeper task - include_tasks: zookeeper/install-upgrade.yml + include_tasks: upgrade/install-upgrade.yml when: - - lock_file_status.stat.exists or before_upgrade_zookeeper_version is version( zookeeper_defaults.zookeeper_version, '<' ) + - lock_file_status.stat.exists + or before_upgrade_zookeeper_version is version( zookeeper_version, '<' ) - name: Remove Zookeeper upgrade flag file file: diff --git a/ansible/playbooks/roles/zookeeper/tasks/upgrade/preflight-check.yml b/ansible/playbooks/roles/zookeeper/tasks/upgrade/preflight-check.yml new file mode 100644 index 0000000000..b13fb74d86 --- /dev/null +++ b/ansible/playbooks/roles/zookeeper/tasks/upgrade/preflight-check.yml @@ -0,0 +1,14 @@ +--- +- name: Check if Zookeeper is installed in default location + stat: + path: /opt/zookeeper/bin/zkServer.sh + get_attributes: false + get_checksum: false + get_mime: false + register: zookeeper_exec_file + +- name: Assert Zookeeper location + assert: + that: + - zookeeper_exec_file.stat.exists + fail_msg: Zookeeper not found in /opt/zookeeper (Epiphany default) - check your configuration diff --git a/ansible/playbooks/upgrade.yml b/ansible/playbooks/upgrade.yml index 81cb260ea1..50521c275d 100644 --- a/ansible/playbooks/upgrade.yml +++ b/ansible/playbooks/upgrade.yml @@ -218,14 +218,16 @@ tasks_from: upgrade/main when: "'grafana' in upgrade_components or upgrade_components|length == 0" +# === kafka === + - hosts: zookeeper serial: 1 become: true become_method: sudo tasks: - - import_role: - name: upgrade - tasks_from: zookeeper + - include_role: + name: zookeeper + tasks_from: upgrade/main vars: { lock_file: /var/tmp/zookeeper-upgrade-in-progress.flag } when: "'zookeeper' in upgrade_components or upgrade_components|length == 0" @@ -237,9 +239,9 @@ shell: executable: /bin/bash tasks: - - import_role: - name: upgrade - tasks_from: kafka + - include_role: + name: kafka + tasks_from: upgrade/main vars: { lock_file: /var/tmp/kafka-upgrade-in-progress.flag } when: "'kafka' in upgrade_components or upgrade_components|length == 0" diff --git a/ansible/playbooks/zookeeper.yml b/ansible/playbooks/zookeeper.yml index d681feaa53..a8c0d8366f 100644 --- a/ansible/playbooks/zookeeper.yml +++ b/ansible/playbooks/zookeeper.yml @@ -2,8 +2,8 @@ # Ansible playbook that makes sure the base items for all nodes are installed - hosts: all - gather_facts: yes - tasks: [ ] + gather_facts: true + tasks: [] - hosts: zookeeper become: true diff --git a/cli/src/ansible/AnsibleVarsGenerator.py b/cli/src/ansible/AnsibleVarsGenerator.py index 7353cd0647..d44fd8086a 100644 --- a/cli/src/ansible/AnsibleVarsGenerator.py +++ b/cli/src/ansible/AnsibleVarsGenerator.py @@ -72,7 +72,7 @@ def generate(self): # is changed between versions (e.g. wal_keep_segments -> wal_keep_size) and sometimes previous parameters # are not compatible with the new ones, defaults are used for template processing roles_with_defaults = [ - 'grafana', 'haproxy', 'image_registry', 'jmx_exporter', 'kafka_exporter', + 'grafana', 'haproxy', 'image_registry', 'jmx_exporter', 'kafka', 'kafka_exporter', 'kibana', 'logging', 'node_exporter', 'postgres_exporter', 'postgresql', 'prometheus', 'rabbitmq', 'repository' ] diff --git a/docs/changelogs/CHANGELOG-2.0.md b/docs/changelogs/CHANGELOG-2.0.md index f797ddeae4..238967c2b1 100644 --- a/docs/changelogs/CHANGELOG-2.0.md +++ b/docs/changelogs/CHANGELOG-2.0.md @@ -40,6 +40,7 @@ - [#3025](https://github.com/epiphany-platform/epiphany/issues/3025) - Running yum commands may hang waiting for user input - [#2728](https://github.com/epiphany-platform/epiphany/issues/2728) - PostgreSQL's configuration files located outside the data directory are not copied by repmgr - [#3029](https://github.com/epiphany-platform/epiphany/issues/3029) - [RHEL] Single machine upgrade fails on preflight check: 'dict object' has no attribute 'size_available' +- [#2803](https://github.com/epiphany-platform/epiphany/issues/2803) - Refactor: rename 'kafka_var' setting ### Updated @@ -55,6 +56,7 @@ - [#2847](https://github.com/epiphany-platform/epiphany/issues/2847) - Upgrade Ansible to 5.2.0 - Ansible 2.10.15 to 5.2.0 - Python 3.7 to 3.10 +- [#2871](https://github.com/epiphany-platform/epiphany/issues/2871) - Upgrade Kafka to 2.8.1 ### Removed diff --git a/docs/home/COMPONENTS.md b/docs/home/COMPONENTS.md index 23f77bb4c0..c255092445 100644 --- a/docs/home/COMPONENTS.md +++ b/docs/home/COMPONENTS.md @@ -14,7 +14,7 @@ Note that versions are default versions and can be changed in certain cases thro | Flannel | 0.14.0 | https://github.com/coreos/flannel/ | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | | Canal | 3.20.3 | https://github.com/projectcalico/calico | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | | Coredns | 1.8.4 | https://github.com/coredns/coredns | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | -| Kafka | 2.6.0 | https://github.com/apache/kafka | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | +| Kafka | 2.8.1 | https://github.com/apache/kafka | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | | Zookeeper | 3.5.8 | https://github.com/apache/zookeeper | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | | RabbitMQ | 3.8.9 | https://github.com/rabbitmq/rabbitmq-server | [Mozilla Public License](https://www.mozilla.org/en-US/MPL/) | | Docker CE | 20.10.8 | https://docs.docker.com/engine/release-notes/ | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | diff --git a/docs/home/howto/RETENTION.md b/docs/home/howto/RETENTION.md index 3fa6ccdb9c..6ae5b8d87f 100644 --- a/docs/home/howto/RETENTION.md +++ b/docs/home/howto/RETENTION.md @@ -22,23 +22,22 @@ kind: configuration/kafka title: "Kafka" name: default specification: - kafka_var: - partitions: 8 - log_retention_hours: 168 - log_retention_bytes: -1 + partitions: 8 + log_retention_hours: 168 + log_retention_bytes: -1 ``` ### Configuration parameters -#### specification.kafka_var.partitions +#### specification.partitions Sets [num.partitions](https://kafka.apache.org/documentation/#brokerconfigs_num.partitions) parameter -#### specification.kafka_var.log_retention_hours +#### specification.log_retention_hours Sets [log.retention.hours](https://kafka.apache.org/documentation/#brokerconfigs_log.retention.bytes) parameter -#### specification.kafka_var.log_retention_bytes +#### specification.log_retention_bytes Sets [log.retention.bytes](https://kafka.apache.org/documentation/#brokerconfigs_log.retention.bytes) parameter diff --git a/docs/home/howto/UPGRADE.md b/docs/home/howto/UPGRADE.md index 0086bbb0b1..04aaaa20da 100644 --- a/docs/home/howto/UPGRADE.md +++ b/docs/home/howto/UPGRADE.md @@ -249,6 +249,7 @@ Kafka will be automatically updated to the latest version supported by Epiphany. version [here](../COMPONENTS.md#epiphany-cluster-components). Kafka brokers are updated one by one - but the update procedure does not guarantee "zero downtime" because it depends on the number of available brokers, topics, and partitioning configuration. +Note that old Kafka binaries are removed during upgrade. ### ZooKeeper upgrade diff --git a/schema/common/defaults/configuration/kafka-exporter.yml b/schema/common/defaults/configuration/kafka-exporter.yml index 334bc124e4..720b9c9c85 100644 --- a/schema/common/defaults/configuration/kafka-exporter.yml +++ b/schema/common/defaults/configuration/kafka-exporter.yml @@ -12,7 +12,6 @@ specification: - '--group.filter=.*' # Regex that determines which consumer groups to collect. #- '--tls.insecure-skip-tls-verify' # If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure. #- '--log.enable-sarama' # Turn on Sarama logging - - '--kafka.version=2.6.0' #- '--sasl.enabled' # Connect using SASL/PLAIN. #- '--sasl.handshake' # Only set this to false if using a non-Kafka SASL proxy #- '--sasl.username=""' diff --git a/schema/common/defaults/configuration/kafka.yml b/schema/common/defaults/configuration/kafka.yml index 1da6a76f96..30feadfdf9 100644 --- a/schema/common/defaults/configuration/kafka.yml +++ b/schema/common/defaults/configuration/kafka.yml @@ -2,72 +2,71 @@ kind: configuration/kafka title: "Kafka" name: default specification: - kafka_var: - enabled: True - admin: kafka - admin_pwd: epiphany - # javax_net_debug: all # uncomment to activate debugging, other debug options: https://colinpaice.blog/2020/04/05/using-java-djavax-net-debug-to-examine-data-flows-including-tls/ - security: - ssl: - enabled: False - port: 9093 - server: - local_cert_download_path: kafka-certs - keystore_location: /var/private/ssl/kafka.server.keystore.jks - truststore_location: /var/private/ssl/kafka.server.truststore.jks - cert_validity: 365 - passwords: - keystore: PasswordToChange - truststore: PasswordToChange - key: PasswordToChange - endpoint_identification_algorithm: HTTPS - client_auth: required - encrypt_at_rest: False - inter_broker_protocol: PLAINTEXT - authorization: - enabled: False - authorizer_class_name: kafka.security.auth.SimpleAclAuthorizer - allow_everyone_if_no_acl_found: False - super_users: - - tester01 - - tester02 - users: - - name: test_user - topic: test_topic - authentication: - enabled: False - authentication_method: certificates - sasl_mechanism_inter_broker_protocol: - sasl_enabled_mechanisms: PLAIN - sha: "b28e81705e30528f1abb6766e22dfe9dae50b1e1e93330c880928ff7a08e6b38ee71cbfc96ec14369b2dfd24293938702cab422173c8e01955a9d1746ae43f98" - port: 9092 - min_insync_replicas: 1 # Minimum number of replicas (ack write) - default_replication_factor: 1 # Minimum number of automatically created topics - offsets_topic_replication_factor: 1 # Minimum number of offsets topic (consider higher value for HA) - num_recovery_threads_per_data_dir: 1 # Minimum number of recovery threads per data dir - num_replica_fetchers: 1 # Minimum number of replica fetchers - replica_fetch_max_bytes: 1048576 - replica_socket_receive_buffer_bytes: 65536 - partitions: 8 # 100 x brokers x replicas for reasonable size cluster. Small clusters can be less - log_retention_hours: 168 # The minimum age of a log file to be eligible for deletion due to age - log_retention_bytes: -1 # -1 is no size limit only a time limit (log_retention_hours). This limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes. - offset_retention_minutes: 10080 # Offsets older than this retention period will be discarded - heap_opts: "-Xmx2G -Xms2G" - opts: "-Djavax.net.debug=all" - jmx_opts: - max_incremental_fetch_session_cache_slots: 1000 - controlled_shutdown_enable: true - group: kafka - user: kafka - conf_dir: /opt/kafka/config - data_dir: /var/lib/kafka - log_dir: /var/log/kafka - socket_settings: - network_threads: 3 # The number of threads handling network requests - io_threads: 8 # The number of threads doing disk I/O - send_buffer_bytes: 102400 # The send buffer (SO_SNDBUF) used by the socket server - receive_buffer_bytes: 102400 # The receive buffer (SO_RCVBUF) used by the socket server - request_max_bytes: 104857600 # The maximum size of a request that the socket server will accept (protection against OOM) + enabled: True + admin: kafka + admin_pwd: epiphany + # javax_net_debug: all # uncomment to activate debugging, other debug options: https://colinpaice.blog/2020/04/05/using-java-djavax-net-debug-to-examine-data-flows-including-tls/ + security: + ssl: + enabled: False + port: 9093 + server: + local_cert_download_path: kafka-certs + keystore_location: /var/private/ssl/kafka.server.keystore.jks + truststore_location: /var/private/ssl/kafka.server.truststore.jks + cert_validity: 365 + passwords: + keystore: PasswordToChange + truststore: PasswordToChange + key: PasswordToChange + endpoint_identification_algorithm: HTTPS + client_auth: required + encrypt_at_rest: False + inter_broker_protocol: PLAINTEXT + authorization: + enabled: False + authorizer_class_name: kafka.security.auth.SimpleAclAuthorizer + allow_everyone_if_no_acl_found: False + super_users: + - tester01 + - tester02 + users: + - name: test_user + topic: test_topic + authentication: + enabled: False + authentication_method: certificates + sasl_mechanism_inter_broker_protocol: + sasl_enabled_mechanisms: PLAIN + sha: "b28e81705e30528f1abb6766e22dfe9dae50b1e1e93330c880928ff7a08e6b38ee71cbfc96ec14369b2dfd24293938702cab422173c8e01955a9d1746ae43f98" + port: 9092 + min_insync_replicas: 1 # Minimum number of replicas (ack write) + default_replication_factor: 1 # Minimum number of automatically created topics + offsets_topic_replication_factor: 1 # Minimum number of offsets topic (consider higher value for HA) + num_recovery_threads_per_data_dir: 1 # Minimum number of recovery threads per data dir + num_replica_fetchers: 1 # Minimum number of replica fetchers + replica_fetch_max_bytes: 1048576 + replica_socket_receive_buffer_bytes: 65536 + partitions: 8 # 100 x brokers x replicas for reasonable size cluster. Small clusters can be less + log_retention_hours: 168 # The minimum age of a log file to be eligible for deletion due to age + log_retention_bytes: -1 # -1 is no size limit only a time limit (log_retention_hours). This limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes. + offset_retention_minutes: 10080 # Offsets older than this retention period will be discarded + heap_opts: "-Xmx2G -Xms2G" + opts: "-Djavax.net.debug=all" + jmx_opts: + max_incremental_fetch_session_cache_slots: 1000 + controlled_shutdown_enable: true + group: kafka + user: kafka + conf_dir: /opt/kafka/config + data_dir: /var/lib/kafka + log_dir: /var/log/kafka + socket_settings: + network_threads: 3 # The number of threads handling network requests + io_threads: 8 # The number of threads doing disk I/O + send_buffer_bytes: 102400 # The send buffer (SO_SNDBUF) used by the socket server + receive_buffer_bytes: 102400 # The receive buffer (SO_RCVBUF) used by the socket server + request_max_bytes: 104857600 # The maximum size of a request that the socket server will accept (protection against OOM) zookeeper_set_acl: false zookeeper_hosts: "{{ groups['zookeeper']|join(':2181,') }}:2181" jmx_exporter_user: jmx-exporter diff --git a/schema/common/validation/configuration/kafka.yml b/schema/common/validation/configuration/kafka.yml index 32b7cb5b98..ebbd14ba68 100644 --- a/schema/common/validation/configuration/kafka.yml +++ b/schema/common/validation/configuration/kafka.yml @@ -3,147 +3,144 @@ title: "Kafka specification schema" description: "Kafka specification schema" type: object properties: - kafka_var: + enabled: + type: boolean + admin: + type: string + admin_pwd: + type: string + javax_net_debug: + type: string + security: type: object properties: - enabled: - type: boolean - admin: - type: string - admin_pwd: - type: string - javax_net_debug: - type: string - security: + ssl: type: object properties: - ssl: + enabled: + type: boolean + port: + type: integer + server: type: object properties: - enabled: - type: boolean - port: + local_cert_download_path: + type: string + keystore_location: + type: string + truststore_location: + type: string + cert_validity: type: integer - server: + passwords: type: object properties: - local_cert_download_path: + keystore: type: string - keystore_location: + truststore: type: string - truststore_location: + key: type: string - cert_validity: - type: integer - passwords: - type: object - properties: - keystore: - type: string - truststore: - type: string - key: - type: string - endpoint_identification_algorithm: - type: string - client_auth: - type: string - encrypt_at_rest: + endpoint_identification_algorithm: + type: string + client_auth: + type: string + encrypt_at_rest: + type: boolean + inter_broker_protocol: + type: string + authorization: + type: object + properties: + enabled: type: boolean - inter_broker_protocol: + authorizer_class_name: type: string - authorization: - type: object - properties: - enabled: - type: boolean - authorizer_class_name: - type: string - allow_everyone_if_no_acl_found: - type: boolean - super_users: - type: array - items: + allow_everyone_if_no_acl_found: + type: boolean + super_users: + type: array + items: + type: string + users: + type: array + items: + type: object + properties: + name: type: string - users: - type: array - items: - type: object - properties: - name: - type: string - topic: - type: string - authentication: - type: object - properties: - enabled: - type: boolean - authentication_method: - type: string - sasl_mechanism_inter_broker_protocol: - type: 'null' - sasl_enabled_mechanisms: - type: string - sha: - type: string - port: - type: integer - min_insync_replicas: - type: integer - default_replication_factor: - type: integer - offsets_topic_replication_factor: - type: integer - num_recovery_threads_per_data_dir: - type: integer - num_replica_fetchers: - type: integer - replica_fetch_max_bytes: - type: integer - replica_socket_receive_buffer_bytes: - type: integer - partitions: + topic: + type: string + authentication: + type: object + properties: + enabled: + type: boolean + authentication_method: + type: string + sasl_mechanism_inter_broker_protocol: + type: 'null' + sasl_enabled_mechanisms: + type: string + sha: + type: string + port: + type: integer + min_insync_replicas: + type: integer + default_replication_factor: + type: integer + offsets_topic_replication_factor: + type: integer + num_recovery_threads_per_data_dir: + type: integer + num_replica_fetchers: + type: integer + replica_fetch_max_bytes: + type: integer + replica_socket_receive_buffer_bytes: + type: integer + partitions: + type: integer + log_retention_hours: + type: integer + log_retention_bytes: + type: integer + offset_retention_minutes: + type: integer + heap_opts: + type: string + opts: + type: string + jmx_opts: + type: 'null' + max_incremental_fetch_session_cache_slots: + type: integer + controlled_shutdown_enable: + type: boolean + group: + type: string + user: + type: string + conf_dir: + type: string + data_dir: + type: string + log_dir: + type: string + socket_settings: + type: object + properties: + network_threads: type: integer - log_retention_hours: + io_threads: type: integer - log_retention_bytes: + send_buffer_bytes: type: integer - offset_retention_minutes: + receive_buffer_bytes: type: integer - heap_opts: - type: string - opts: - type: string - jmx_opts: - type: 'null' - max_incremental_fetch_session_cache_slots: + request_max_bytes: type: integer - controlled_shutdown_enable: - type: boolean - group: - type: string - user: - type: string - conf_dir: - type: string - data_dir: - type: string - log_dir: - type: string - socket_settings: - type: object - properties: - network_threads: - type: integer - io_threads: - type: integer - send_buffer_bytes: - type: integer - receive_buffer_bytes: - type: integer - request_max_bytes: - type: integer zookeeper_set_acl: type: boolean zookeeper_hosts: