diff --git a/README.md b/README.md index b996c4b75d..7fe94fdcc5 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,9 @@ Epiphany at its core is a full automation of Kubernetes and Docker plus addition - Kafka or RabbitMQ for high speed messaging/events - Prometheus and Alertmanager for monitoring with Graphana for visualization -- Elasticsearch and Kibana for centralized logging (OpenDistro) +- OpenSearch for centralized logging - HAProxy for loadbalancing -- Postgres and Elasticsearch for data storage +- Postgres and OpenSearch for data storage - KeyCloak for authentication - Helm as package manager for Kubernetes diff --git a/ansible/playbooks/backup_logging.yml b/ansible/playbooks/backup_logging.yml index c1252ec696..34c40726f4 100644 --- a/ansible/playbooks/backup_logging.yml +++ b/ansible/playbooks/backup_logging.yml @@ -14,10 +14,10 @@ name: component_vars - import_role: name: backup - tasks_from: logging_elasticsearch_snapshot + tasks_from: logging_opensearch_snapshot - import_role: name: backup - tasks_from: logging_elasticsearch_etc + tasks_from: logging_opensearch_conf - hosts: kibana[0] gather_facts: true @@ -28,10 +28,10 @@ - when: specification.components.logging.enabled | default(false) block: - include_vars: - file: roles/kibana/vars/main.yml + file: roles/opensearch_dashboards/vars/main.yml name: component_vars - import_role: name: backup - tasks_from: logging_kibana_etc + tasks_from: logging_opensearch_dashboards_conf vars: snapshot_name: "{{ hostvars[groups.logging.0].snapshot_name }}" diff --git a/ansible/playbooks/filebeat.yml b/ansible/playbooks/filebeat.yml index d2295b29c3..952fefa1aa 100644 --- a/ansible/playbooks/filebeat.yml +++ b/ansible/playbooks/filebeat.yml @@ -1,7 +1,7 @@ --- # Ansible playbook that installs and configures Filebeat -- hosts: opendistro_for_elasticsearch:logging:kibana # to gather facts +- hosts: opensearch:logging:opensearch_dashboards # to gather facts tasks: [] - hosts: filebeat diff --git a/ansible/playbooks/kibana.yml b/ansible/playbooks/kibana.yml deleted file mode 100644 index 882d4c66ff..0000000000 --- a/ansible/playbooks/kibana.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# Ansible playbook that makes sure the base items for all nodes are installed - -- hosts: all - gather_facts: yes - tasks: [ ] - -- hosts: kibana - become: true - become_method: sudo - roles: - - kibana diff --git a/ansible/playbooks/opendistro_for_elasticsearch.yml b/ansible/playbooks/opendistro_for_elasticsearch.yml deleted file mode 100644 index 9ec9a72ed6..0000000000 --- a/ansible/playbooks/opendistro_for_elasticsearch.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# Ansible playbook for installing Elasticsearch - -- hosts: opendistro_for_elasticsearch - become: true - become_method: sudo - roles: - - opendistro_for_elasticsearch - vars: - current_group_name: "opendistro_for_elasticsearch" diff --git a/ansible/playbooks/opensearch.yml b/ansible/playbooks/opensearch.yml new file mode 100644 index 0000000000..b4a6e188df --- /dev/null +++ b/ansible/playbooks/opensearch.yml @@ -0,0 +1,10 @@ +--- +# Ansible playbook for installing OpenSearch + +- hosts: opensearch + become: true + become_method: sudo + roles: + - opensearch + vars: + current_group_name: "opensearch" diff --git a/ansible/playbooks/opensearch_dashboards.yml b/ansible/playbooks/opensearch_dashboards.yml new file mode 100644 index 0000000000..66c8bd5ed8 --- /dev/null +++ b/ansible/playbooks/opensearch_dashboards.yml @@ -0,0 +1,10 @@ +--- +# Ansible playbook for installing OpenSearch Dashboards +- hosts: repository # to gather facts + tasks: [] + +- hosts: opensearch_dashboards + become: true + become_method: sudo + roles: + - opensearch_dashboards diff --git a/ansible/playbooks/recovery_logging.yml b/ansible/playbooks/recovery_logging.yml index 796d1c0bae..c7cd956778 100644 --- a/ansible/playbooks/recovery_logging.yml +++ b/ansible/playbooks/recovery_logging.yml @@ -13,10 +13,10 @@ name: component_vars - import_role: name: recovery - tasks_from: logging_elasticsearch_etc + tasks_from: logging_opensearch_conf - import_role: name: recovery - tasks_from: logging_elasticsearch_snapshot + tasks_from: logging_opensearch_snapshot - hosts: kibana[0] gather_facts: true @@ -27,8 +27,8 @@ - when: specification.components.logging.enabled | default(false) block: - include_vars: - file: roles/kibana/vars/main.yml + file: roles/opensearch_dashboards/vars/main.yml name: component_vars - import_role: name: recovery - tasks_from: logging_kibana_etc + tasks_from: logging_opensearch_dashboards_conf diff --git a/ansible/playbooks/roles/backup/defaults/main.yml b/ansible/playbooks/roles/backup/defaults/main.yml index ca3f722d86..51cc26574b 100644 --- a/ansible/playbooks/roles/backup/defaults/main.yml +++ b/ansible/playbooks/roles/backup/defaults/main.yml @@ -2,5 +2,5 @@ backup_dir: /epibackup backup_destination_dir: "{{ backup_dir }}/mounted" backup_destination_host: "{{ groups.repository[0] if (custom_repository_url | default(false)) else (resolved_repository_hostname | default(groups.repository[0])) }}" -elasticsearch_snapshot_repository_name: epiphany -elasticsearch_snapshot_repository_location: /var/lib/elasticsearch-snapshots +opensearch_snapshot_repository_name: epiphany +opensearch_snapshot_repository_location: /var/lib/opensearch-snapshots diff --git a/ansible/playbooks/roles/backup/tasks/logging_elasticsearch_snapshot.yml b/ansible/playbooks/roles/backup/tasks/logging_elasticsearch_snapshot.yml deleted file mode 100644 index 67e55ed34d..0000000000 --- a/ansible/playbooks/roles/backup/tasks/logging_elasticsearch_snapshot.yml +++ /dev/null @@ -1,89 +0,0 @@ ---- -- name: Include default vars from opendistro_for_elasticsearch role - include_vars: - file: roles/opendistro_for_elasticsearch/defaults/main.yml - name: odfe - -- name: Set helper facts - set_fact: - elasticsearch_endpoint: >- - https://{{ ansible_default_ipv4.address }}:9200 - snapshot_name: >- - {{ ansible_date_time.iso8601_basic_short | replace('T','-') }} - vars: - uri_template: &uri - client_cert: "{{ odfe.certificates.dirs.certs }}/{{ odfe.certificates.files.admin.cert.filename }}" - client_key: "{{ odfe.certificates.dirs.certs }}/{{ odfe.certificates.files.admin.key.filename }}" - validate_certs: false - body_format: json - -- debug: var=snapshot_name - -- name: Check cluster health - uri: - <<: *uri - url: "{{ elasticsearch_endpoint }}/_cluster/health" - method: GET - register: uri_response - until: uri_response is success - retries: 12 - delay: 5 - -- name: Ensure snapshot repository is defined - uri: - <<: *uri - url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}" - method: PUT - body: - type: fs - settings: - location: "{{ elasticsearch_snapshot_repository_location }}" - compress: true - -- name: Trigger snapshot creation - uri: - <<: *uri - url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}/{{ snapshot_name }}" - method: PUT - -- name: Wait (up to 12h) for snapshot completion - uri: - <<: *uri - url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}/{{ snapshot_name }}" - method: GET - register: uri_response - until: (uri_response.json.snapshots | selectattr('snapshot', 'equalto', snapshot_name) | first).state == "SUCCESS" - retries: "{{ (12 * 3600 // 10) | int }}" # 12h - delay: 10 - -- name: Find all snapshots - uri: - <<: *uri - url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}/_all" - method: GET - register: uri_response - -- name: Delete old snapshots - uri: - <<: *uri - url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}/{{ item }}" - method: DELETE - loop: >- - {{ uri_response.json.snapshots | map(attribute='snapshot') | reject('equalto', snapshot_name) | list }} - -- name: Create snapshot archive - import_tasks: common/create_snapshot_archive.yml - vars: - snapshot_prefix: "elasticsearch_snapshot" - dirs_to_archive: - - "{{ elasticsearch_snapshot_repository_location }}/" - -- name: Create snapshot checksum - import_tasks: common/create_snapshot_checksum.yml - -- name: Transfer artifacts via rsync - import_tasks: common/download_via_rsync.yml - vars: - artifacts: - - "{{ snapshot_path }}" - - "{{ snapshot_path }}.sha1" diff --git a/ansible/playbooks/roles/backup/tasks/logging_elasticsearch_etc.yml b/ansible/playbooks/roles/backup/tasks/logging_opensearch_conf.yml similarity index 63% rename from ansible/playbooks/roles/backup/tasks/logging_elasticsearch_etc.yml rename to ansible/playbooks/roles/backup/tasks/logging_opensearch_conf.yml index b9e2bf79db..65dd5b88ad 100644 --- a/ansible/playbooks/roles/backup/tasks/logging_elasticsearch_etc.yml +++ b/ansible/playbooks/roles/backup/tasks/logging_opensearch_conf.yml @@ -1,4 +1,14 @@ --- +- name: Include default vars from opensearch role + include_vars: + file: roles/opensearch/defaults/main.yml + name: opensearch_defaults + +- name: Include vars from opensearch role + include_vars: + file: roles/opensearch/vars/main.yml + name: opensearch_vars + - name: Assert that the snapshot_name fact is defined and valid assert: that: @@ -12,9 +22,9 @@ - name: Create snapshot archive import_tasks: common/create_snapshot_archive.yml vars: - snapshot_prefix: "elasticsearch_etc" + snapshot_prefix: "opensearch_conf" dirs_to_archive: - - /etc/elasticsearch/ + - "{{ opensearch_vars.specification.paths.opensearch_conf_dir }}" - name: Create snapshot checksum import_tasks: common/create_snapshot_checksum.yml diff --git a/ansible/playbooks/roles/backup/tasks/logging_kibana_etc.yml b/ansible/playbooks/roles/backup/tasks/logging_opensearch_dashboards_conf.yml similarity index 68% rename from ansible/playbooks/roles/backup/tasks/logging_kibana_etc.yml rename to ansible/playbooks/roles/backup/tasks/logging_opensearch_dashboards_conf.yml index 4b774e7d4f..c0a045bf4d 100644 --- a/ansible/playbooks/roles/backup/tasks/logging_kibana_etc.yml +++ b/ansible/playbooks/roles/backup/tasks/logging_opensearch_dashboards_conf.yml @@ -9,12 +9,17 @@ - debug: var=snapshot_name +- name: Include vars from opensearch_dashboards role + include_vars: + file: roles/opensearch_dashboards/vars/main.yml + name: opensearch_dashboards_vars + - name: Create snapshot archive import_tasks: common/create_snapshot_archive.yml vars: - snapshot_prefix: "kibana_etc" + snapshot_prefix: "opensearch_dashboards_conf_dir" dirs_to_archive: - - /etc/kibana/ + - "{{ opensearch_dashboards_vars.specification.paths.opensearch_dashboards_conf_dir }}" - name: Create snapshot checksum import_tasks: common/create_snapshot_checksum.yml diff --git a/ansible/playbooks/roles/backup/tasks/logging_opensearch_snapshot.yml b/ansible/playbooks/roles/backup/tasks/logging_opensearch_snapshot.yml new file mode 100644 index 0000000000..f15c850f24 --- /dev/null +++ b/ansible/playbooks/roles/backup/tasks/logging_opensearch_snapshot.yml @@ -0,0 +1,96 @@ +--- +- name: Include default vars from opensearch role + include_vars: + file: roles/opensearch/defaults/main.yml + name: opensearch_defaults + +- name: Set helper facts + set_fact: + opensearch_endpoint: >- + https://{{ ansible_default_ipv4.address }}:9200 + snapshot_name: >- + {{ ansible_date_time.iso8601_basic_short | replace('T','-') }} + vars: + uri_template: &uri + client_cert: "{{ opensearch_defaults.certificates.dirs.certs }}/{{ opensearch_defaults.certificates.files.admin.cert.filename }}" + client_key: "{{ opensearch_defaults.certificates.dirs.certs }}/{{ opensearch_defaults.certificates.files.admin.key.filename }}" + validate_certs: false + body_format: json + +- name: Check cluster health + uri: + <<: *uri + url: "{{ opensearch_endpoint }}/_cluster/health" + method: GET + return_content: yes + register: cluster_status + until: cluster_status.json.status + retries: 60 + delay: 1 + +- name: No backup warning + when: not cluster_status.json.number_of_nodes == 1 + debug: + msg: "[WARNING] No snapshot backup created as only single-node cluster backup is supported." + +- name: Snapshot backup + when: cluster_status.json.number_of_nodes == 1 # https://github.com/epiphany-platform/epiphany/blob/develop/docs/home/howto/BACKUP.md#logging + block: + - name: Ensure snapshot repository is defined + uri: + <<: *uri + url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}" + method: PUT + body: + type: fs + settings: + location: "{{ opensearch_snapshot_repository_location }}" + compress: true + + - name: Trigger snapshot creation + uri: + <<: *uri + url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}/{{ snapshot_name }}" + method: PUT + + - name: Wait (up to 12h) for snapshot completion + uri: + <<: *uri + url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}/{{ snapshot_name }}" + method: GET + register: uri_response + until: (uri_response.json.snapshots | selectattr('snapshot', 'equalto', snapshot_name) | first).state == "SUCCESS" + retries: "{{ (12 * 3600 // 10) | int }}" # 12h + delay: 10 + + - name: Find all snapshots + uri: + <<: *uri + url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}/_all" + method: GET + register: uri_response + + - name: Delete old snapshots + uri: + <<: *uri + url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}/{{ item }}" + method: DELETE + loop: >- + {{ uri_response.json.snapshots | map(attribute='snapshot') | reject('equalto', snapshot_name) | list }} + + - name: Create snapshot archive + import_tasks: common/create_snapshot_archive.yml + vars: + snapshot_prefix: "opensearch_snapshot" + dirs_to_archive: + - "{{ opensearch_snapshot_repository_location }}/" + + - name: Create snapshot checksum + import_tasks: common/create_snapshot_checksum.yml + + - name: Transfer artifacts via rsync + import_tasks: common/download_via_rsync.yml + vars: + artifacts: + - "{{ snapshot_path }}" + - "{{ snapshot_path }}.sha1" diff --git a/ansible/playbooks/roles/elasticsearch_curator/tasks/main.yml b/ansible/playbooks/roles/elasticsearch_curator/tasks/main.yml index 8df02d6c8a..d743ae642f 100644 --- a/ansible/playbooks/roles/elasticsearch_curator/tasks/main.yml +++ b/ansible/playbooks/roles/elasticsearch_curator/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Include installation task - include_tasks: install-es-curator-{{ ansible_os_family }}.yml + include_tasks: install-ops-curator-{{ ansible_os_family }}.yml - name: Include configuration tasks - include_tasks: configure-cron-jobs.yml \ No newline at end of file + include_tasks: configure-cron-jobs.yml diff --git a/ansible/playbooks/roles/filebeat/tasks/configure-filebeat.yml b/ansible/playbooks/roles/filebeat/tasks/configure-filebeat.yml index cb7e2a723e..159ebc1b74 100644 --- a/ansible/playbooks/roles/filebeat/tasks/configure-filebeat.yml +++ b/ansible/playbooks/roles/filebeat/tasks/configure-filebeat.yml @@ -7,7 +7,7 @@ name: postgresql_defaults when: "'postgresql' in group_names" -# Do not select Kibana configured to use ES deployed by 'opendistro_for_elasticsearch' role +# Do not select OpenSearch Dashboards configured host to use OpenSearch deployed by 'opensearch' role - name: Set value for setup.kibana.host set_fact: setup_kibana_host: >- diff --git a/ansible/playbooks/roles/filebeat/templates/filebeat.yml.j2 b/ansible/playbooks/roles/filebeat/templates/filebeat.yml.j2 index a6715edf20..fccc15e7be 100644 --- a/ansible/playbooks/roles/filebeat/templates/filebeat.yml.j2 +++ b/ansible/playbooks/roles/filebeat/templates/filebeat.yml.j2 @@ -169,16 +169,21 @@ setup.template.settings: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards is disabled by default and can be enabled either by setting the # options here or by using the `setup` command. -{% set dashboards_enabled = is_upgrade_run | ternary(existing_setup_dashboards.enabled, specification.kibana.dashboards.enabled) %} -{% if dashboards_enabled | lower == 'auto' %} - {% if group_names | intersect(['kibana', 'logging']) | count == 2 %} -setup.dashboards.enabled: true - {% else %} +# +# Below logic commented out as a workaround for problem with filebeat till the time OPS team will resolve it. +# More info: https://github.com/opensearch-project/OpenSearch-Dashboards/issues/656#issuecomment-978036236 +# A static value is used instead: setup.dashboards.enabled: false - {% endif %} -{% else %} -setup.dashboards.enabled: {{ dashboards_enabled | lower }} -{% endif %} +# {% set dashboards_enabled = is_upgrade_run | ternary(existing_setup_dashboards.enabled, specification.kibana.dashboards.enabled) %} +# {% if dashboards_enabled | lower == 'auto' %} +# {% if group_names | intersect(['kibana', 'logging']) | count == 2 %} +# setup.dashboards.enabled: true +# {% else %} +#setup.dashboards.enabled: false +# {% endif %} +#{% else %} +#setup.dashboards.enabled: {{ dashboards_enabled | lower }} +#{% endif %} # The Elasticsearch index name. # This setting overwrites the index name defined in the dashboards and index pattern. @@ -369,14 +374,14 @@ processors: #monitoring.enabled: false # Sets the UUID of the Elasticsearch cluster under which monitoring data for this -# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch +# Filebeat instance will appear in the Stack Monitoring UI. If output.OpenSearch # is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the # Elasticsearch output are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. -# Any setting that is not set is automatically inherited from the Elasticsearch +# Any setting that is not set is automatically inherited from the OpenSearch # output configuration, so if you have the Elasticsearch output configured such # that it is pointing to your Elasticsearch monitoring cluster, you can simply # uncomment the following line. diff --git a/ansible/playbooks/roles/kibana/defaults/main.yml b/ansible/playbooks/roles/kibana/defaults/main.yml deleted file mode 100644 index f07c1f3457..0000000000 --- a/ansible/playbooks/roles/kibana/defaults/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -kibana_version: - RedHat: "1.13.1" - Debian: "1.13.1" - -# Required and used for upgrade Open Distro for Elasticsearch - Kibana: -specification: - kibana_log_dir: /var/log/kibana diff --git a/ansible/playbooks/roles/kibana/tasks/main.yml b/ansible/playbooks/roles/kibana/tasks/main.yml deleted file mode 100644 index be53a44420..0000000000 --- a/ansible/playbooks/roles/kibana/tasks/main.yml +++ /dev/null @@ -1,68 +0,0 @@ ---- -- name: Install Kibana package - package: - name: "{{ _packages[ansible_os_family] }}" - state: present - vars: - _packages: - Debian: - - opendistroforelasticsearch-kibana={{ kibana_version[ansible_os_family] }} - RedHat: - - opendistroforelasticsearch-kibana-{{ kibana_version[ansible_os_family] }} - module_defaults: - yum: { lock_timeout: "{{ yum_lock_timeout }}" } - -- name: Include logging configuration tasks - include_tasks: setup-logging.yml - -- name: Load variables from logging/opendistro_for_elasticsearch role - when: context is undefined or context != "upgrade" - block: - - name: Load variables from logging role - include_vars: - file: roles/logging/vars/main.yml - name: opendistro_for_logging_vars - when: "'logging' in group_names" - - - name: Load variables from opendistro_for_elasticsearch role - include_vars: - file: roles/opendistro_for_elasticsearch/vars/main.yml - name: opendistro_for_data_vars - when: "'opendistro_for_elasticsearch' in group_names" - -- name: Update Kibana configuration file - template: - backup: yes - src: kibana.yml.j2 - dest: /etc/kibana/kibana.yml - owner: kibana - group: root - mode: u=rw,go= - register: change_config - -- name: Restart Kibana service - systemd: - name: kibana - state: restarted - when: change_config.changed - -- name: Start kibana service - service: - name: kibana - state: started - enabled: yes - -- name: Wait for kibana to start listening - wait_for: - host: "{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}" - port: 5601 - delay: 5 - -- name: Wait for Kibana to be ready - uri: - url: http://{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}:5601/api/status - method: GET - register: response - until: "'kbn_name' in response and response.status == 200" - retries: 120 - delay: 2 diff --git a/ansible/playbooks/roles/kibana/tasks/setup-logging.yml b/ansible/playbooks/roles/kibana/tasks/setup-logging.yml deleted file mode 100644 index d87de424ce..0000000000 --- a/ansible/playbooks/roles/kibana/tasks/setup-logging.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: Create log directory for Kibana - file: path={{ specification.kibana_log_dir }} state=directory - -- name: Create logfile for Kibana - copy: - dest: "{{ specification.kibana_log_dir }}/kibana.log" - owner: kibana - group: kibana - mode: 0644 - force: no - content: "" - -- name: Set permissions on logfile for Kibana - file: - path: "{{ specification.kibana_log_dir }}/kibana.log" - owner: kibana - group: kibana - mode: 0644 - -- name: Copy logrotate config - template: - dest: /etc/logrotate.d/kibana - owner: root - group: root - mode: 0644 - src: logrotate.conf.j2 diff --git a/ansible/playbooks/roles/kibana/templates/kibana.yml.j2 b/ansible/playbooks/roles/kibana/templates/kibana.yml.j2 deleted file mode 100644 index e27bf5112d..0000000000 --- a/ansible/playbooks/roles/kibana/templates/kibana.yml.j2 +++ /dev/null @@ -1,64 +0,0 @@ -# {{ ansible_managed }} - -# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). -# You may not use this file except in compliance with the License. -# A copy of the License is located at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. - -# Description: -# Default Kibana configuration for Open Distro. - -server.host: "{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}" -elasticsearch.hosts: -{% if 'logging' in group_names %} - # Logging hosts: - {% for host in groups['logging'] %} - - "https://{{hostvars[host]['ansible_hostname']}}:9200" - {% endfor %} -{% elif 'opendistro_for_elasticsearch' in group_names %} - # Data hosts: - {% for host in groups['opendistro_for_elasticsearch'] %} - - "https://{{hostvars[host]['ansible_hostname']}}:9200" - {% endfor %} -{% endif %} - -elasticsearch.ssl.verificationMode: none -elasticsearch.username: kibanaserver -{% set password = 'kibanaserver' %} -{% if context is undefined or context != 'upgrade' -%} - {# mode: apply -#} - {% if 'logging' in group_names -%} - {% set password = opendistro_for_logging_vars.specification.kibanaserver_password -%} - {% elif 'opendistro_for_elasticsearch' in group_names -%} - {% set password = opendistro_for_data_vars.specification.kibanaserver_password -%} - {% endif %} -{% else -%} - {# mode: upgrade -#} - {% set password = existing_es_password %} -{% endif %} -elasticsearch.password: {{ "'%s'" % password | replace("'","''") }} -elasticsearch.requestHeadersWhitelist: ["securitytenant","Authorization"] - -# Enables you to specify a file where Kibana stores log output. -logging.dest: {{ specification.kibana_log_dir }}/kibana.log - -opendistro_security.multitenancy.enabled: true -opendistro_security.multitenancy.tenants.preferred: ["Private", "Global"] -opendistro_security.readonly_mode.roles: ["kibana_read_only"] - -# Provided with 1.10.1 version: -# https://opendistro.github.io/for-elasticsearch-docs/docs/upgrade/1-10-1/ -# Use this setting if you are running kibana without https -opendistro_security.cookie.secure: false - -newsfeed.enabled: false -telemetry.optIn: false -telemetry.enabled: false diff --git a/ansible/playbooks/roles/kibana/templates/logrotate.conf.j2 b/ansible/playbooks/roles/kibana/templates/logrotate.conf.j2 deleted file mode 100644 index d550d97e19..0000000000 --- a/ansible/playbooks/roles/kibana/templates/logrotate.conf.j2 +++ /dev/null @@ -1,8 +0,0 @@ -{{ specification.kibana_log_dir }}/*.log { - rotate 5 - daily - compress - missingok - notifempty - delaycompress -} diff --git a/ansible/playbooks/roles/logging/tasks/main.yml b/ansible/playbooks/roles/logging/tasks/main.yml index 5671e42791..4c615900a2 100644 --- a/ansible/playbooks/roles/logging/tasks/main.yml +++ b/ansible/playbooks/roles/logging/tasks/main.yml @@ -10,8 +10,8 @@ run_once: true no_log: true # contains sensitive data -- name: Install and configure OpenDistro for Elasticsearch +- name: Install and configure OpenSearch import_role: - name: opendistro_for_elasticsearch + name: opensearch vars: - specification: "{{ logging_vars.specification }}" # to override opendistro_for_elasticsearch specification + specification: "{{ logging_vars.specification }}" # to override OpenSearch specification diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-es.yml b/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-es.yml deleted file mode 100644 index 4bed42d55f..0000000000 --- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-es.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Install elasticsearch-oss packages - package: - name: "{{ _packages[ansible_os_family] }}" - state: present - vars: - _packages: - Debian: - - elasticsearch-oss={{ versions[ansible_os_family].elasticsearch_oss }} - RedHat: - - elasticsearch-oss-{{ versions[ansible_os_family].elasticsearch_oss }} - register: install_elasticsearch_package - module_defaults: - yum: { lock_timeout: "{{ yum_lock_timeout }}" } diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-opendistro.yml b/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-opendistro.yml deleted file mode 100644 index d38b2ebcd3..0000000000 --- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-opendistro.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -# NOTE: Keep in mind compatibility matrix for Open Distro https://opendistro.github.io/for-elasticsearch-docs/docs/install/plugins/#plugin-compatibility -- name: Install opendistro-* packages - package: - name: "{{ _packages[ansible_os_family] }}" - state: present - vars: - _packages: - Debian: - - opendistro-alerting={{ versions[ansible_os_family].opendistro }} - - opendistro-index-management={{ versions[ansible_os_family].opendistro }} - - opendistro-job-scheduler={{ versions[ansible_os_family].opendistro }} - - opendistro-performance-analyzer={{ versions[ansible_os_family].opendistro }} - - opendistro-security={{ versions[ansible_os_family].opendistro }} - - opendistro-sql={{ versions[ansible_os_family].opendistro }} - RedHat: - - opendistro-alerting-{{ versions[ansible_os_family].opendistro }} - - opendistro-index-management-{{ versions[ansible_os_family].opendistro }} - - opendistro-job-scheduler-{{ versions[ansible_os_family].opendistro }} - - opendistro-performance-analyzer-{{ versions[ansible_os_family].opendistro }} - - opendistro-security-{{ versions[ansible_os_family].opendistro }} - - opendistro-sql-{{ versions[ansible_os_family].opendistro }} - register: install_opendistro_packages - module_defaults: - yum: { lock_timeout: "{{ yum_lock_timeout }}" } diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/patch-log4j.yml b/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/patch-log4j.yml deleted file mode 100644 index 917c2e52d7..0000000000 --- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/patch-log4j.yml +++ /dev/null @@ -1,68 +0,0 @@ ---- -- name: Log4j patch - block: - - name: "opendistro_for_elasticsearch : Log4j patch | Get archive" - include_role: - name: download - tasks_from: download_file - vars: - file_name: "{{ log4j_file_name }}" - - - name: Log4j patch | Extract archive - unarchive: - dest: /tmp/ - src: "{{ download_directory }}/{{ log4j_file_name }}" - remote_src: true - list_files: true - register: unarchive_list_files - - - name: Log4j patch | Copy new jars - register: log4j_patch - copy: - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: elasticsearch - group: root - mode: u=rw,g=r,o= - remote_src: true - loop: - - { src: "{{ download_directory }}/{{ log4j_api }}", dest: /usr/share/elasticsearch/lib/ } - - { src: "{{ download_directory }}/{{ log4j_api }}", dest: /usr/share/elasticsearch/performance-analyzer-rca/lib/ } - - { src: "{{ download_directory }}/{{ log4j_api }}", dest: /usr/share/elasticsearch/plugins/opendistro-performance-analyzer/performance-analyzer-rca/lib/ } - - { src: "{{ download_directory }}/{{ log4j_core }}", dest: /usr/share/elasticsearch/lib/ } - - { src: "{{ download_directory }}/{{ log4j_core }}", dest: /usr/share/elasticsearch/performance-analyzer-rca/lib/ } - - { src: "{{ download_directory }}/{{ log4j_core }}", dest: /usr/share/elasticsearch/plugins/opendistro-performance-analyzer/performance-analyzer-rca/lib/ } - - { src: "{{ download_directory }}/{{ log4j_slfj_impl }}", dest: /usr/share/elasticsearch/plugins/opendistro_security/ } - vars: - log4j_api: "{{ unarchive_list_files.files | select('contains', 'log4j-api-2.17.1.jar') | first }}" - log4j_core: "{{ unarchive_list_files.files | select('contains', 'log4j-core-2.17.1.jar') | first }}" - log4j_slfj_impl: "{{ unarchive_list_files.files | select('contains', 'log4j-slf4j-impl-2.17.1.jar') | first }}" - - - name: Log4j patch - cleanup - block: - - name: Log4j patch | Remove old jars - file: - state: absent - path: "{{ item }}" - loop: - - /usr/share/elasticsearch/plugins/opendistro-performance-analyzer/performance-analyzer-rca/lib/log4j-api-2.13.0.jar - - /usr/share/elasticsearch/plugins/opendistro-performance-analyzer/performance-analyzer-rca/lib/log4j-core-2.13.0.jar - - /usr/share/elasticsearch/performance-analyzer-rca/lib/log4j-api-2.13.0.jar - - /usr/share/elasticsearch/performance-analyzer-rca/lib/log4j-core-2.13.0.jar - - /usr/share/elasticsearch/lib/log4j-api-2.11.1.jar - - /usr/share/elasticsearch/lib/log4j-core-2.11.1.jar - - /usr/share/elasticsearch/plugins/opendistro_security/log4j-slf4j-impl-2.11.1.jar - - - name: Log4j patch | Delete temporary dir - file: - dest: "{{ download_directory }}/{{ _archive_root_dir }}" - state: absent - vars: - _archive_root_dir: >- - {{ unarchive_list_files.files | first | dirname }} - -- name: Restart opendistro-performance-analyzer service - systemd: - name: opendistro-performance-analyzer - state: restarted - when: log4j_patch.changed diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/defaults/main.yml b/ansible/playbooks/roles/opensearch/defaults/main.yml similarity index 76% rename from ansible/playbooks/roles/opendistro_for_elasticsearch/defaults/main.yml rename to ansible/playbooks/roles/opensearch/defaults/main.yml index cbde5b2a67..f8ed73e3ec 100644 --- a/ansible/playbooks/roles/opendistro_for_elasticsearch/defaults/main.yml +++ b/ansible/playbooks/roles/opensearch/defaults/main.yml @@ -1,18 +1,17 @@ --- # This file is meant to be also used by upgrade role - -versions: - RedHat: - elasticsearch_oss: "7.10.2" - opendistro: "1.13.*" - Debian: - elasticsearch_oss: "7.10.2" - opendistro: "1.13.*" +file_name_version: + opensearch: + x86_64: opensearch-1.2.4-linux-x64.tar.gz + aarch64: opensearch-1.2.4-linux-arm64.tar.gz + opensearch_perftop: + x86_64: opensearch-perf-top-1.2.0.0-linux-x64.zip + # Perftop is not supported on ARM (https://github.com/opensearch-project/perftop/issues/26) certificates: dirs: - certs: /etc/elasticsearch - ca_key: /etc/elasticsearch/private - csr: /etc/elasticsearch/csr + certs: /usr/share/opensearch/config + ca_key: /usr/share/opensearch/config + csr: /usr/share/opensearch/config dn_attributes_order: ['CN', 'OU', 'O', 'L', 'S', 'C', 'DC'] files: demo: @@ -24,8 +23,8 @@ certificates: node: cert: esnode.pem key: esnode-key.pem - opendistro_security: - allow_unsafe_democertificates: false # if 'false' all demo files must be removed to start Elasticsearch + opensearch_security: + allow_unsafe_democertificates: false # if 'false' all demo files must be removed to start OpenSearch common: subject: &common-subject O: Epiphany diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/meta/main.yml b/ansible/playbooks/roles/opensearch/meta/main.yml similarity index 100% rename from ansible/playbooks/roles/opendistro_for_elasticsearch/meta/main.yml rename to ansible/playbooks/roles/opensearch/meta/main.yml diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/configure-es.yml b/ansible/playbooks/roles/opensearch/tasks/configure-opensearch.yml similarity index 73% rename from ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/configure-es.yml rename to ansible/playbooks/roles/opensearch/tasks/configure-opensearch.yml index f60cf05e27..43d8026775 100644 --- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/configure-es.yml +++ b/ansible/playbooks/roles/opensearch/tasks/configure-opensearch.yml @@ -3,19 +3,19 @@ - name: Ensure snapshot folder exists file: - path: "{{ specification.paths.repo }}/" + path: "{{ specification.paths.opensearch_repo }}/" state: directory - owner: elasticsearch - group: elasticsearch + owner: "{{ specification.opensearch_os_user }}" + group: "{{ specification.opensearch_os_group }}" mode: u=rwx,go= - name: Provide JVM configuration file template: backup: yes src: jvm.options.j2 - dest: /etc/elasticsearch/jvm.options - owner: root - group: elasticsearch + dest: "{{ specification.paths.opensearch_conf_dir }}/jvm.options" + owner: "{{ specification.opensearch_os_user }}" + group: "{{ specification.opensearch_os_group }}" mode: ug=rw,o= register: change_jvm_config vars: @@ -31,36 +31,36 @@ - include_tasks: generate-certs.yml -- name: Provide Elasticsearch configuration file +- name: Provide OpenSearch configuration file template: backup: yes - src: elasticsearch.yml.j2 - dest: /etc/elasticsearch/elasticsearch.yml - owner: root - group: elasticsearch + src: opensearch.yml.j2 + dest: "{{ specification.paths.opensearch_conf_dir }}/opensearch.yml" + owner: "{{ specification.opensearch_os_user }}" + group: "{{ specification.opensearch_os_group }}" mode: ug=rw,o= register: change_config vars: node_cert_filename: http: >- - {{ existing_es_config['opendistro_security.ssl.http.pemcert_filepath'] if (is_upgrade_run) else + {{ existing_es_config['opensearch_security.ssl.http.pemcert_filepath'] if (is_upgrade_run) else certificates.files.node.cert.filename }} transport: >- - {{ existing_es_config['opendistro_security.ssl.transport.pemcert_filepath'] if (is_upgrade_run) else + {{ existing_es_config['opensearch_security.ssl.transport.pemcert_filepath'] if (is_upgrade_run) else certificates.files.node.cert.filename }} node_key_filename: http: >- - {{ existing_es_config['opendistro_security.ssl.http.pemkey_filepath'] if (is_upgrade_run) else + {{ existing_es_config['opensearch_security.ssl.http.pemkey_filepath'] if (is_upgrade_run) else certificates.files.node.key.filename }} transport: >- - {{ existing_es_config['opendistro_security.ssl.transport.pemkey_filepath'] if (is_upgrade_run) else + {{ existing_es_config['opensearch_security.ssl.transport.pemkey_filepath'] if (is_upgrade_run) else certificates.files.node.key.filename }} root_ca_cert_filename: http: >- - {{ existing_es_config['opendistro_security.ssl.http.pemtrustedcas_filepath'] if (is_upgrade_run) else + {{ existing_es_config['opensearch_security.ssl.http.pemtrustedcas_filepath'] if (is_upgrade_run) else certificates.files.root_ca.cert.filename }} transport: >- - {{ existing_es_config['opendistro_security.ssl.transport.pemtrustedcas_filepath'] if (is_upgrade_run) else + {{ existing_es_config['opensearch_security.ssl.transport.pemtrustedcas_filepath'] if (is_upgrade_run) else certificates.files.root_ca.cert.filename }} _epiphany_subjects: admin: "{{ certificates.files.admin.cert.subject }}" @@ -76,7 +76,7 @@ {{ _epiphany_dn_attributes.node | zip(_epiphany_dn_attributes.node | map('extract', _epiphany_subjects.node)) | map('join','=') | join(',') }} admin_dn: >- - {{ existing_es_config['opendistro_security.authcz.admin_dn'] if (is_upgrade_run) else + {{ existing_es_config['opensearch_security.authcz.admin_dn'] if (is_upgrade_run) else [ _epiphany_DNs.admin ] }} _epiphany_nodes_dn: >- {%- if groups[current_group_name] | length > 1 -%} @@ -90,55 +90,44 @@ {%- if not loop.last -%},{%- else -%}]{%- endif -%} {%- endfor -%} nodes_dn: >- - {{ existing_es_config['opendistro_security.nodes_dn'] if (is_upgrade_run) else + {{ existing_es_config['opensearch_security.nodes_dn'] if (is_upgrade_run) else _epiphany_nodes_dn }} - opendistro_security_allow_unsafe_democertificates: "{{ certificates.files.demo.opendistro_security.allow_unsafe_democertificates }}" + opensearch_security_allow_unsafe_democertificates: "{{ certificates.files.demo.opensearch_security.allow_unsafe_democertificates }}" http_port: "{{ is_upgrade_run | ternary(existing_es_config['http.port'], ports.http) }}" transport_port: "{{ is_upgrade_run | ternary(existing_es_config['transport.port'], ports.transport) }}" -# When 'opendistro_security.allow_unsafe_democertificates' is set to 'false' all demo certificate files must be removed, -# otherwise elasticsearch service doesn't start. +# When 'opensearch_security.allow_unsafe_democertificates' is set to 'false' all demo certificate files must be removed, +# otherwise opensearch service doesn't start. # For apply mode, demo certificate files are removed based only on their names. For upgrade mode, # public key fingerprints are checked to protect against unintentional deletion (what takes additional time). - name: Remove demo certificate files include_tasks: file: "{{ is_upgrade_run | ternary('remove-known-demo-certs.yml', 'remove-demo-certs.yml') }}" - when: not certificates.files.demo.opendistro_security.allow_unsafe_democertificates + when: not certificates.files.demo.opensearch_security.allow_unsafe_democertificates -- name: Include log4j patch - include_tasks: patch-log4j.yml - -- name: Restart elasticsearch service +- name: Restart OpenSearch service systemd: - name: elasticsearch + name: opensearch state: restarted - register: restart_elasticsearch + enabled: yes + register: restart_opensearch when: change_config.changed - or log4j_patch.changed or change_jvm_config.changed - or install_elasticsearch_package.changed - or (install_opendistro_packages is defined and install_opendistro_packages.changed) - -- name: Enable and start elasticsearch service - systemd: - name: elasticsearch - state: started - enabled: yes - name: Change default users when: not is_upgrade_run block: - - name: Wait for elasticsearch service to start up - when: restart_elasticsearch.changed + - name: Wait for opensearch service to start up + when: restart_opensearch.changed wait_for: port: 9200 host: "{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}" - name: Set helper facts set_fact: - elasticsearch_endpoint: https://{{ ansible_default_ipv4.address }}:9200 + opensearch_endpoint: https://{{ ansible_default_ipv4.address }}:9200 vars: uri_template: &uri client_cert: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.cert.filename }}" @@ -149,7 +138,7 @@ - name: Check if default admin user exists uri: <<: *uri - url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/admin" + url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/admin" method: GET # 404 code is used there as someone can remove admin user on its own. status_code: [200, 404] @@ -159,10 +148,10 @@ delay: 1 run_once: true - - name: Set OpenDistro admin password + - name: Set OpenSearch admin password uri: <<: *uri - url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/" + url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/" method: PATCH status_code: [200] body: @@ -184,7 +173,7 @@ - name: Check if default kibanaserver user exists uri: <<: *uri - url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/kibanaserver" + url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/kibanaserver" method: GET status_code: [200] register: kibanaserver_check_response @@ -194,10 +183,10 @@ run_once: true when: specification.kibanaserver_user_active - - name: Set OpenDistro kibanaserver password + - name: Set OpenSearch kibanaserver password uri: <<: *uri - url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/" + url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/" method: PATCH status_code: [200] body: @@ -217,7 +206,7 @@ - name: Check if default logstash user exists uri: <<: *uri - url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/logstash" + url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/logstash" method: GET status_code: [200] register: logstash_check_response @@ -227,10 +216,10 @@ run_once: true when: specification.logstash_user_active - - name: Set OpenDistro logstash password + - name: Set OpenSearch logstash password uri: <<: *uri - url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/" + url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/" method: PATCH status_code: [200] body: @@ -249,10 +238,10 @@ run_once: true when: specification.logstash_user_active - - name: Remove OpenDistro demo users + - name: Remove OpenSearch demo users uri: <<: *uri - url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/{{ item }}" + url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/{{ item }}" method: DELETE status_code: [200, 404] register: uri_response diff --git a/ansible/playbooks/roles/opensearch/tasks/configure-sysctl.yml b/ansible/playbooks/roles/opensearch/tasks/configure-sysctl.yml new file mode 100644 index 0000000000..113fdd1797 --- /dev/null +++ b/ansible/playbooks/roles/opensearch/tasks/configure-sysctl.yml @@ -0,0 +1,12 @@ +--- +- name: Set open files limit in sysctl.conf + sysctl: + name: fs.file-max + value: "65536" + state: present + +- name: Set maximum number of memory map areas limit in sysctl.conf + sysctl: + name: vm.max_map_count + value: "262144" + state: present diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/generate-certs.yml b/ansible/playbooks/roles/opensearch/tasks/generate-certs.yml similarity index 92% rename from ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/generate-certs.yml rename to ansible/playbooks/roles/opensearch/tasks/generate-certs.yml index 898d6cbe35..476604a2f8 100644 --- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/generate-certs.yml +++ b/ansible/playbooks/roles/opensearch/tasks/generate-certs.yml @@ -5,18 +5,18 @@ file: state: directory path: "{{ certificates.dirs.ca_key }}" - owner: root - group: elasticsearch - mode: u=rwx,g=rx,o= # elasticsearch.service requires 'rx' for group + owner: "{{ specification.opensearch_os_user }}" + group: opensearch + mode: u=rwx,g=rwx,o= # csr files are kept only for idempotency - name: Create directory for CSR files file: state: directory path: "{{ certificates.dirs.csr }}" - owner: root - group: elasticsearch - mode: u=rwx,g=rx,o= # CSR file doesn't contain private key + owner: "{{ specification.opensearch_os_user }}" + group: opensearch + mode: u=rwx,g=rwx,o= # CSR file doesn't contain private key - name: Generate keys and certificates on first node when: inventory_hostname == ansible_play_hosts_all[0] @@ -25,20 +25,17 @@ size: 2048 # based on ODFE docs type: RSA mode: u=rw,go= - owner: root - group: elasticsearch + owner: "{{ specification.opensearch_os_user }}" format: pkcs8 community.crypto.openssl_csr: mode: u=rw,g=r,o= - owner: root - group: elasticsearch + owner: "{{ specification.opensearch_os_user }}" use_common_name_for_san: false community.crypto.x509_certificate: selfsigned_digest: sha256 ownca_digest: sha256 mode: u=rw,g=r,o= - owner: root - group: elasticsearch + owner: "{{ specification.opensearch_os_user }}" block: # --- Generate CA root certificate --- @@ -122,7 +119,7 @@ module_defaults: copy: owner: root - group: elasticsearch + group: opensearch block: - name: Get certificate files from the first host slurp: @@ -171,9 +168,9 @@ format: pkcs8 size: 2048 type: RSA - mode: u=rw,g=r,o= # elasticsearch.service requires 'r' for group - owner: root - group: elasticsearch + mode: u=rw,g=r,o= + owner: "{{ specification.opensearch_os_user }}" + group: opensearch return_content: false register: node_key @@ -199,8 +196,8 @@ subjectAltName: "{{ _dns_list + [ 'IP:' + ansible_default_ipv4.address ] }}" use_common_name_for_san: false mode: u=rw,g=r,o= - owner: root - group: elasticsearch + owner: "{{ specification.opensearch_os_user }}" + group: opensearch register: node_csr vars: _unique_hostnames: "{{ [ansible_hostname, ansible_nodename, ansible_fqdn] | unique }}" @@ -217,5 +214,5 @@ ownca_not_after: "{{ certificates.files.node.cert.ownca_not_after }}" ownca_digest: sha256 mode: u=rw,go=r - owner: root - group: elasticsearch + owner: "{{ specification.opensearch_os_user }}" + group: opensearch diff --git a/ansible/playbooks/roles/opensearch/tasks/install-opensearch.yml b/ansible/playbooks/roles/opensearch/tasks/install-opensearch.yml new file mode 100644 index 0000000000..8cef9b5b70 --- /dev/null +++ b/ansible/playbooks/roles/opensearch/tasks/install-opensearch.yml @@ -0,0 +1,65 @@ +--- +- name: Download Opensearch + include_role: + name: download + tasks_from: download_file + vars: + file_name: "{{ file_name_version.opensearch[ansible_architecture] }}" + +- name: Download PerfTop + include_role: + name: download + tasks_from: download_file + vars: + file_name: "{{ file_name_version.opensearch_perftop[ansible_architecture] }}" + when: ansible_architecture == "x86_64" # Perftop is not yet supported on ARM (https://github.com/opensearch-project/perftop/issues/26) + +- name: Ensure Opensearch service OS group exists + group: + name: "{{ specification.opensearch_os_group }}" + state: present + +- name: Ensure Opensearch service OS user exists + user: + name: "{{ specification.opensearch_os_user }}" + state: present + shell: /bin/bash + +- name: Ensure directory structure exists + file: + path: "{{ item }}" + state: directory + owner: "{{ specification.opensearch_os_user }}" + group: "{{ specification.opensearch_os_group }}" + mode: u=rwx,go=r + recurse: yes + loop: + - "{{ specification.paths.opensearch_home }}" + - "{{ specification.paths.opensearch_perftop_home }}" + - "{{ specification.paths.opensearch_log_dir }}" + - "{{ specification.paths.opensearch_conf_dir }}" + - "{{ specification.paths.opensearch_data }}" + - "{{ specification.paths.opensearch_logs }}" + - "{{ certificates.dirs.certs }}" + +- name: Extract OpenSearch tar file + unarchive: + src: "{{ download_directory }}/{{ file_name_version.opensearch[ansible_architecture] }}" + dest: "{{ specification.paths.opensearch_home }}" + owner: "{{ specification.opensearch_os_user }}" + remote_src: yes + extra_opts: + - --strip-components=1 + +- name: Extract OpenSearch PerfTop tar file + unarchive: + src: "{{ download_directory }}/{{ file_name_version.opensearch_perftop[ansible_architecture] }}" + dest: "{{ specification.paths.opensearch_perftop_home }}" + owner: "{{ specification.opensearch_os_user }}" + remote_src: yes + when: ansible_architecture == "x86_64" # Perftop is not yet supported on ARM (https://github.com/opensearch-project/perftop/issues/26) + +- name: Create opensearch.service unit file + template: + src: roles/opensearch/templates/opensearch.service.j2 + dest: "/etc/systemd/system/opensearch.service" diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/main.yml b/ansible/playbooks/roles/opensearch/tasks/main.yml similarity index 75% rename from ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/main.yml rename to ansible/playbooks/roles/opensearch/tasks/main.yml index 6860c69c17..1f1c8e3843 100644 --- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/main.yml +++ b/ansible/playbooks/roles/opensearch/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Configure OS limits (open files, processes and locked-in-memory address space) pam_limits: - domain: elasticsearch + domain: opensearch limit_type: "{{ item.limit_type }}" limit_item: "{{ item.limit_item }}" value: "{{ item.value }}" @@ -13,9 +13,11 @@ - { limit_type: 'soft', limit_item: 'memlock', value: unlimited } - { limit_type: 'hard', limit_item: 'memlock', value: unlimited } -- include_tasks: install-es.yml +- name: Tune the system settings + include_tasks: configure-sysctl.yml -- include_tasks: install-opendistro.yml +- name: Include installation tasks + include_tasks: install-opensearch.yml - name: Include configuration tasks - include_tasks: configure-es.yml + include_tasks: configure-opensearch.yml diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/remove-demo-certs.yml b/ansible/playbooks/roles/opensearch/tasks/remove-demo-certs.yml similarity index 100% rename from ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/remove-demo-certs.yml rename to ansible/playbooks/roles/opensearch/tasks/remove-demo-certs.yml diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/remove-known-demo-certs.yml b/ansible/playbooks/roles/opensearch/tasks/remove-known-demo-certs.yml similarity index 100% rename from ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/remove-known-demo-certs.yml rename to ansible/playbooks/roles/opensearch/tasks/remove-known-demo-certs.yml diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/templates/jvm.options.j2 b/ansible/playbooks/roles/opensearch/templates/jvm.options.j2 similarity index 89% rename from ansible/playbooks/roles/opendistro_for_elasticsearch/templates/jvm.options.j2 rename to ansible/playbooks/roles/opensearch/templates/jvm.options.j2 index e91e6b6635..def6b9e830 100644 --- a/ansible/playbooks/roles/opendistro_for_elasticsearch/templates/jvm.options.j2 +++ b/ansible/playbooks/roles/opensearch/templates/jvm.options.j2 @@ -51,7 +51,7 @@ 14-:-XX:InitiatingHeapOccupancyPercent=30 ## JVM temporary directory --Djava.io.tmpdir=${ES_TMPDIR} +-Djava.io.tmpdir=${OPENSEARCH_TMPDIR} ## heap dumps @@ -61,23 +61,23 @@ # specify an alternative path for heap dumps; ensure the directory exists and # has sufficient space --XX:HeapDumpPath=/var/lib/elasticsearch +-XX:HeapDumpPath=/var/lib/opensearch # specify an alternative path for JVM fatal error logs --XX:ErrorFile=/var/log/elasticsearch/hs_err_pid%p.log +-XX:ErrorFile=/var/log/opensearch/hs_err_pid%p.log ## JDK 8 GC logging 8:-XX:+PrintGCDetails 8:-XX:+PrintGCDateStamps 8:-XX:+PrintTenuringDistribution 8:-XX:+PrintGCApplicationStoppedTime -8:-Xloggc:/var/log/elasticsearch/gc.log +8:-Xloggc:/var/log/opensearch/gc.log 8:-XX:+UseGCLogFileRotation 8:-XX:NumberOfGCLogFiles=32 8:-XX:GCLogFileSize=64m # JDK 9+ GC logging -9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m +9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/opensearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m ## OpenDistro Performance Analyzer -Dclk.tck=100 diff --git a/ansible/playbooks/roles/opensearch/templates/opensearch.service.j2 b/ansible/playbooks/roles/opensearch/templates/opensearch.service.j2 new file mode 100644 index 0000000000..a886e79dd1 --- /dev/null +++ b/ansible/playbooks/roles/opensearch/templates/opensearch.service.j2 @@ -0,0 +1,51 @@ +[Unit] +Description=OpenSearch +Wants=network-online.target +After=network-online.target + +[Service] +RuntimeDirectory=opensearch +PrivateTmp=true + +WorkingDirectory={{ specification.paths.opensearch_home }} + +User={{ specification.opensearch_os_user }} +Group={{ specification.opensearch_os_user }} + +ExecStart={{ specification.paths.opensearch_home }}/bin/opensearch -p {{ specification.paths.opensearch_home }}/opensearch.pid -q + +StandardOutput=journal +StandardError=inherit + +# Specifies the maximum file descriptor number that can be opened by this process +LimitNOFILE=65536 + +# Specifies the memory lock settings +LimitMEMLOCK=infinity + +# Specifies the maximum number of processes +LimitNPROC=4096 + +# Specifies the maximum size of virtual memory +LimitAS=infinity + +# Specifies the maximum file size +LimitFSIZE=infinity + +# Disable timeout logic and wait until process is stopped +TimeoutStopSec=0 + +# SIGTERM signal is used to stop the Java process +KillSignal=SIGTERM + +# Send the signal only to the JVM rather than its control group +KillMode=process + +# Java process is never killed +SendSIGKILL=no + +# When a JVM receives a SIGTERM signal it exits with code 143 +SuccessExitStatus=143 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/templates/elasticsearch.yml.j2 b/ansible/playbooks/roles/opensearch/templates/opensearch.yml.j2 similarity index 60% rename from ansible/playbooks/roles/opendistro_for_elasticsearch/templates/elasticsearch.yml.j2 rename to ansible/playbooks/roles/opensearch/templates/opensearch.yml.j2 index 0214fcc7d0..b8e627d5cb 100644 --- a/ansible/playbooks/roles/opendistro_for_elasticsearch/templates/elasticsearch.yml.j2 +++ b/ansible/playbooks/roles/opensearch/templates/opensearch.yml.j2 @@ -1,16 +1,10 @@ #jinja2: lstrip_blocks: True # {{ ansible_managed }} -# ======================== Elasticsearch Configuration ========================= +# ======================== OpenSearch Configuration ========================= # -# NOTE: Elasticsearch comes with reasonable defaults for most settings. -# Before you set out to tweak and tune the configuration, make sure you -# understand what are you trying to accomplish and the consequences. +# ------------------- Legacy Clients Compability Flag ------------------------- # -# The primary way of configuring a node is via this file. This template lists -# the most important settings you may want to configure for a production cluster. -# -# Please consult the documentation for further information on configuration options: -# https://www.elastic.co/guide/en/elasticsearch/reference/index.html +compatibility.override_main_response_version: true # # ---------------------------------- Cluster ----------------------------------- # @@ -32,15 +26,15 @@ node.name: {{ ansible_hostname }} # # Path to directory where to store the data (separate multiple locations by comma): # -path.data: {{ specification.paths.data }} +path.data: {{ specification.paths.opensearch_data }} # # Path to directory where the shared storage should be mounted: # -path.repo: {{ specification.paths.repo }} +path.repo: {{ specification.paths.opensearch_repo }} # # Path to log files: # -path.logs: {{ specification.paths.logs }} +path.logs: {{ specification.paths.opensearch_logs }} # # ----------------------------------- Memory ----------------------------------- # @@ -52,7 +46,7 @@ path.logs: {{ specification.paths.logs }} # on the system and that the owner of the process is allowed to use this # limit. # -# Elasticsearch performs poorly when the system is swapping the memory. +# OpenSearch performs poorly when the system is swapping the memory. # # ---------------------------------- Network ----------------------------------- # @@ -109,33 +103,33 @@ cluster.initial_master_nodes: ["{{ ansible_hostname }}"] # #action.destructive_requires_name: true -######## Start OpenDistro for Elasticsearch Security Configuration ######## +######## OpenSearch Security Configuration ######## # WARNING: revise all the lines below before you go into production -opendistro_security.ssl.transport.pemcert_filepath: {{ node_cert_filename.transport }} -opendistro_security.ssl.transport.pemkey_filepath: {{ node_key_filename.transport }} -opendistro_security.ssl.transport.pemtrustedcas_filepath: {{ root_ca_cert_filename.transport }} -opendistro_security.ssl.transport.enforce_hostname_verification: {{ specification.opendistro_security.ssl.transport.enforce_hostname_verification | lower }} -opendistro_security.ssl.http.enabled: true -opendistro_security.ssl.http.pemcert_filepath: {{ node_cert_filename.http }} -opendistro_security.ssl.http.pemkey_filepath: {{ node_key_filename.http }} -opendistro_security.ssl.http.pemtrustedcas_filepath: {{ root_ca_cert_filename.http }} -opendistro_security.allow_unsafe_democertificates: {{ opendistro_security_allow_unsafe_democertificates | lower }} -opendistro_security.allow_default_init_securityindex: true -opendistro_security.authcz.admin_dn: +plugins.security.ssl.transport.pemcert_filepath: "{{ certificates.dirs.certs }}/{{ node_cert_filename.transport }}" +plugins.security.ssl.transport.pemkey_filepath: "{{ certificates.dirs.ca_key }}/{{ node_key_filename.transport }}" +plugins.security.ssl.transport.pemtrustedcas_filepath: "{{ certificates.dirs.certs }}/{{ root_ca_cert_filename.transport }}" +plugins.security.ssl.transport.enforce_hostname_verification: {{ specification.opensearch_security.ssl.transport.enforce_hostname_verification | lower }} +plugins.security.ssl.http.enabled: true +plugins.security.ssl.http.pemcert_filepath: "{{ certificates.dirs.certs }}/{{ node_cert_filename.http }}" +plugins.security.ssl.http.pemkey_filepath: "{{ certificates.dirs.ca_key }}/{{ node_key_filename.http }}" +plugins.security.ssl.http.pemtrustedcas_filepath: "{{ certificates.dirs.certs }}/{{ root_ca_cert_filename.http }}" +plugins.security.allow_unsafe_democertificates: {{ opensearch_security_allow_unsafe_democertificates | lower }} +plugins.security.allow_default_init_securityindex: true +plugins.security.authcz.admin_dn: {% for dn in admin_dn %} - '{{ dn }}' {% endfor %} {% if nodes_dn | count > 0 %} -opendistro_security.nodes_dn: +plugins.security.nodes_dn: {% for dn in nodes_dn %} - '{{ dn }}' {% endfor %} {% endif %} -opendistro_security.audit.type: internal_elasticsearch -opendistro_security.enable_snapshot_restore_privilege: true -opendistro_security.check_snapshot_restore_write_privileges: true -opendistro_security.restapi.roles_enabled: ["all_access", "security_rest_api_access"] +plugins.security.audit.type: internal_opensearch +plugins.security.enable_snapshot_restore_privilege: true +plugins.security.check_snapshot_restore_write_privileges: true +plugins.security.restapi.roles_enabled: ["all_access", "security_rest_api_access"] cluster.routing.allocation.disk.threshold_enabled: false node.max_local_storage_nodes: 3 -######## End OpenDistro for Elasticsearch Security Configuration ######## +######## End OpenSearch Security Configuration ######## diff --git a/ansible/playbooks/roles/opensearch_dashboards/defaults/main.yml b/ansible/playbooks/roles/opensearch_dashboards/defaults/main.yml new file mode 100644 index 0000000000..e282dc07ae --- /dev/null +++ b/ansible/playbooks/roles/opensearch_dashboards/defaults/main.yml @@ -0,0 +1,11 @@ +--- +file_name_version: + opensearch_dashboards: + x86_64: opensearch-dashboards-1.2.0-linux-x64.tar.gz + aarch64: opensearch-dashboards-1.2.0-linux-arm64.tar.gz +opensearch_api_port: 9200 +opensearch_nodes_dashboards: |- + {% for item in groups['opensearch_dashboards'] -%} + https://{{ hostvars[item]['ansible_host'] }}:{{ opensearch_api_port }}{% if not loop.last %}","{% endif %} + {%- endfor %} +java: "{{ es_java | default('java-1.8.0-openjdk.x86_64') }}" diff --git a/ansible/playbooks/roles/opensearch_dashboards/handlers/main.yml b/ansible/playbooks/roles/opensearch_dashboards/handlers/main.yml new file mode 100644 index 0000000000..09474e767a --- /dev/null +++ b/ansible/playbooks/roles/opensearch_dashboards/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Restart dashboards + systemd: + name: opensearch-dashboards + state: restarted + enabled: yes diff --git a/ansible/playbooks/roles/opensearch_dashboards/tasks/dashboards.yml b/ansible/playbooks/roles/opensearch_dashboards/tasks/dashboards.yml new file mode 100644 index 0000000000..1dc5d97b43 --- /dev/null +++ b/ansible/playbooks/roles/opensearch_dashboards/tasks/dashboards.yml @@ -0,0 +1,43 @@ +--- +- name: Download Opensearch dashbaords + include_role: + name: download + tasks_from: download_file + vars: + file_name: "{{ file_name_version.opensearch_dashboards[ansible_architecture] }}" + +- name: Create OpenSearch Dashboards user + user: + name: "{{ specification.dashboards_os_user }}" + state: present + shell: /bin/bash + +- name: Create home directory + file: + path: "{{ specification.paths.opensearchdash_home }}" + state: directory + owner: "{{ specification.dashboards_os_user }}" + group: "{{ specification.dashboards_os_user }}" + +- name: Extract OpenSearch Dashboards tar file + unarchive: + src: "{{ download_directory }}/{{ file_name_version.opensearch_dashboards[ansible_architecture] }}" + dest: "{{ specification.paths.opensearchdash_home }}" + owner: "{{ specification.dashboards_os_user }}" + remote_src: yes + extra_opts: + - --strip-components=1 + +- name: Copy configuration file + template: + src: opensearch_dashboards.yml.j2 + dest: "{{ specification.paths.opensearchdash_conf_dir }}/opensearch_dashboards.yml" + owner: "{{ specification.dashboards_os_user }}" + group: "{{ specification.dashboards_os_user }}" + mode: 0644 + backup: yes + +- name: Create opensearch-dashboards.service unit file + template: + src: opensearch-dashboards.service.j2 + dest: /etc/systemd/system/opensearch-dashboards.service diff --git a/ansible/playbooks/roles/opensearch_dashboards/tasks/main.yml b/ansible/playbooks/roles/opensearch_dashboards/tasks/main.yml new file mode 100644 index 0000000000..6c1d137b13 --- /dev/null +++ b/ansible/playbooks/roles/opensearch_dashboards/tasks/main.yml @@ -0,0 +1,22 @@ +--- + +- hostname: + name: "{{ inventory_hostname }}" + +- name: Include dashboards installation + include_tasks: dashboards.yml + +- name: Make sure OpenSearch Dashboards is started + service: + name: opensearch-dashboards + state: started + enabled: yes + +- name: Get all the installed dashboards plugins + command: "{{ specification.paths.opensearchdash_plugin_bin_path }} list" + become: false # This command can not be run as root user + register: list_plugins + +- name: Show all the installed dashboards plugins + debug: + msg: "{{ list_plugins.stdout }}" diff --git a/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch-dashboards.service.j2 b/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch-dashboards.service.j2 new file mode 100644 index 0000000000..4613cb3ace --- /dev/null +++ b/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch-dashboards.service.j2 @@ -0,0 +1,48 @@ +[Unit] +Description=OpenSearch Dashboards +Wants=network-online.target +After=network-online.target + +[Service] +RuntimeDirectory=opensearch-dashboards +PrivateTmp=true + +WorkingDirectory={{ specification.paths.opensearchdash_home }} + +User={{ specification.dashboards_os_user }} +Group={{ specification.dashboards_os_user }} + +ExecStart={{ specification.paths.opensearchdash_home }}/bin/opensearch-dashboards -q + +StandardOutput=journal +StandardError=inherit + +# Specifies the maximum file descriptor number that can be opened by this process +LimitNOFILE=65536 + +# Specifies the maximum number of processes +LimitNPROC=4096 + +# Specifies the maximum size of virtual memory +LimitAS=infinity + +# Specifies the maximum file size +LimitFSIZE=infinity + +# Disable timeout logic and wait until process is stopped +TimeoutStopSec=0 + +# SIGTERM signal is used to stop the Java process +KillSignal=SIGTERM + +# Send the signal only to the JVM rather than its control group +KillMode=process + +# Java process is never killed +SendSIGKILL=no + +# When a JVM receives a SIGTERM signal it exits with code 143 +SuccessExitStatus=143 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch_dashboards.yml.j2 b/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch_dashboards.yml.j2 new file mode 100644 index 0000000000..c3b62436fa --- /dev/null +++ b/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch_dashboards.yml.j2 @@ -0,0 +1,13 @@ +server.port: 5601 +server.host: "{{ ansible_host }}" +opensearch.hosts: ["{{ opensearch_nodes_dashboards }}"] +opensearch.ssl.verificationMode: none +opensearch.username: "{{ specification.dashboards_user }}" +opensearch.password: "{{ specification.dashboards_user_password }}" +opensearch.requestHeadersWhitelist: [ authorization,securitytenant ] + +opensearch_security.multitenancy.enabled: true +opensearch_security.multitenancy.tenants.preferred: ["Private", "Global"] +opensearch_security.readonly_mode.roles: ["kibana_read_only"] +# Use this setting if you are running dashboards without https +opensearch_security.cookie.secure: false diff --git a/ansible/playbooks/roles/preflight/defaults/main.yml b/ansible/playbooks/roles/preflight/defaults/main.yml index 860f7db731..2cbb5f877d 100644 --- a/ansible/playbooks/roles/preflight/defaults/main.yml +++ b/ansible/playbooks/roles/preflight/defaults/main.yml @@ -38,8 +38,6 @@ unsupported_roles: - haproxy - logging - elasticsearch_curator - - opendistro_for_elasticsearch - - elasticsearch - kibana - filebeat - prometheus @@ -75,8 +73,6 @@ unsupported_roles: - haproxy - logging - elasticsearch_curator - - opendistro_for_elasticsearch - - elasticsearch - kibana - filebeat - prometheus diff --git a/ansible/playbooks/roles/recovery/defaults/main.yml b/ansible/playbooks/roles/recovery/defaults/main.yml index 88be45c8a6..e105375aa7 100644 --- a/ansible/playbooks/roles/recovery/defaults/main.yml +++ b/ansible/playbooks/roles/recovery/defaults/main.yml @@ -2,5 +2,5 @@ recovery_dir: /epibackup recovery_source_dir: "{{ recovery_dir }}/mounted" recovery_source_host: "{{ groups.repository[0] if (custom_repository_url | default(false)) else (resolved_repository_hostname | default(groups.repository[0])) }}" -elasticsearch_snapshot_repository_name: epiphany -elasticsearch_snapshot_repository_location: /var/lib/elasticsearch-snapshots +opensearch_snapshot_repository_name: epiphany +opensearch_snapshot_repository_location: /var/lib/opensearch-snapshots diff --git a/ansible/playbooks/roles/recovery/tasks/logging_kibana_etc.yml b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_conf.yml similarity index 62% rename from ansible/playbooks/roles/recovery/tasks/logging_kibana_etc.yml rename to ansible/playbooks/roles/recovery/tasks/logging_opensearch_conf.yml index 3792303795..3b50d75ca1 100644 --- a/ansible/playbooks/roles/recovery/tasks/logging_kibana_etc.yml +++ b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_conf.yml @@ -1,8 +1,13 @@ --- +- name: Include vars from opensearch role + include_vars: + file: roles/opensearch/vars/main.yml + name: opensearch_vars + - name: Find snapshot archive import_tasks: common/find_snapshot_archive.yml vars: - snapshot_prefix: "kibana_etc" + snapshot_prefix: "opensearch_conf" snapshot_name: "{{ specification.components.logging.snapshot_name }}" - name: Transfer the archive via rsync @@ -15,24 +20,24 @@ - name: Verify snapshot checksum import_tasks: common/verify_snapshot_checksum.yml -- name: Stop kibana service +- name: Stop OpenSearch service systemd: - name: kibana + name: opensearch state: stopped - name: Clear directories import_tasks: common/clear_directories.yml vars: dirs_to_clear: - - /etc/kibana/ + - "{{ opensearch_vars.specification.paths.opensearch_conf_dir }}" - name: Extract the archive unarchive: - dest: /etc/kibana/ + dest: "{{ opensearch_vars.specification.paths.opensearch_conf_dir }}" src: "{{ recovery_dir }}/{{ snapshot_path | basename }}" remote_src: true -- name: Start kibana service +- name: Start OpenSearch service systemd: - name: kibana + name: opensearch state: started diff --git a/ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_etc.yml b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_dashboards_conf.yml similarity index 59% rename from ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_etc.yml rename to ansible/playbooks/roles/recovery/tasks/logging_opensearch_dashboards_conf.yml index 7c81954bf5..fcbfcd0f2e 100644 --- a/ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_etc.yml +++ b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_dashboards_conf.yml @@ -1,8 +1,13 @@ --- +- name: Include vars from opensearch role + include_vars: + file: roles/opensearch_dashboards/vars/main.yml + name: opensearch_dashboards_vars + - name: Find snapshot archive import_tasks: common/find_snapshot_archive.yml vars: - snapshot_prefix: "elasticsearch_etc" + snapshot_prefix: "opsd_conf_dir" snapshot_name: "{{ specification.components.logging.snapshot_name }}" - name: Transfer the archive via rsync @@ -15,24 +20,24 @@ - name: Verify snapshot checksum import_tasks: common/verify_snapshot_checksum.yml -- name: Stop elasticsearch service +- name: Stop opensearch-dashboards service systemd: - name: elasticsearch + name: opensearch-dashboards state: stopped - name: Clear directories import_tasks: common/clear_directories.yml vars: dirs_to_clear: - - /etc/elasticsearch/ + - "{{ opensearch_dashboards_vars.specification.paths.opsd_conf_dir }}" - name: Extract the archive unarchive: - dest: /etc/elasticsearch/ + dest: "{{ opensearch_dashboards_vars.specification.paths.opsd_conf_dir }}" src: "{{ recovery_dir }}/{{ snapshot_path | basename }}" remote_src: true -- name: Start elasticsearch service +- name: Start opensearch-dashboards service systemd: - name: elasticsearch + name: opensearch-dashboards state: started diff --git a/ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_snapshot.yml b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_snapshot.yml similarity index 67% rename from ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_snapshot.yml rename to ansible/playbooks/roles/recovery/tasks/logging_opensearch_snapshot.yml index f1fa9bf15f..af1ba56789 100644 --- a/ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_snapshot.yml +++ b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_snapshot.yml @@ -1,12 +1,12 @@ --- -- name: Include default vars from opendistro_for_elasticsearch role +- name: Include default vars from opensearch role include_vars: - file: roles/opendistro_for_elasticsearch/defaults/main.yml + file: roles/opensearch/defaults/main.yml name: odfe - name: Set helper facts set_fact: - elasticsearch_endpoint: >- + opensearch_endpoint: >- https://{{ ansible_default_ipv4.address }}:9200 vars: uri_template: &uri @@ -18,7 +18,7 @@ - name: Check cluster health uri: <<: *uri - url: "{{ elasticsearch_endpoint }}/_cluster/health" + url: "{{ opensearch_endpoint }}/_cluster/health" method: GET register: uri_response until: uri_response is success @@ -28,7 +28,7 @@ - name: Find snapshot archive import_tasks: common/find_snapshot_archive.yml vars: - snapshot_prefix: "elasticsearch_snapshot" + snapshot_prefix: "opensearch_snapshot" snapshot_name: "{{ specification.components.logging.snapshot_name }}" - name: Transfer the archive via rsync @@ -45,38 +45,38 @@ import_tasks: common/clear_directories.yml vars: dirs_to_clear: - - "{{ elasticsearch_snapshot_repository_location }}/" + - "{{ opensearch_snapshot_repository_location }}/" - name: Extract the archive unarchive: - dest: "{{ elasticsearch_snapshot_repository_location }}/" + dest: "{{ opensearch_snapshot_repository_location }}/" src: "{{ recovery_dir }}/{{ snapshot_path | basename }}" remote_src: true - name: Change snapshot directory permissions file: - path: "{{ elasticsearch_snapshot_repository_location }}/" - owner: elasticsearch - group: elasticsearch + path: "{{ opensearch_snapshot_repository_location }}/" + owner: opensearch + group: opensearch recurse: true - name: Reconstruct the snapshot_name set_fact: snapshot_name: >- - {{ snapshot_path | basename | regex_replace('^elasticsearch_snapshot_(.*).tar.gz$', '\1') }} + {{ snapshot_path | basename | regex_replace('^opensearch_snapshot_(.*).tar.gz$', '\1') }} - debug: var=snapshot_name -- name: Ensure all kibana and filebeat instances are stopped, then restore the snapshot +- name: Ensure all OPSD and filebeat instances are stopped, then restore the snapshot block: - - name: Stop all kibana instances + - name: Stop allOpenSearch Dashboards instances delegate_to: "{{ item }}" systemd: - name: kibana + name: opensearch-dashboards state: stopped enabled: false - loop: "{{ groups.kibana | default([]) }}" + loop: "{{ groups.opensearch_dashboards | default([]) }}" - name: Stop all filebeat instances delegate_to: "{{ item }}" @@ -89,29 +89,29 @@ - name: Close all indices uri: <<: *uri - url: "{{ elasticsearch_endpoint }}/_all/_close" + url: "{{ opensearch_endpoint }}/_all/_close" method: POST - name: Delete all indices uri: <<: *uri - url: "{{ elasticsearch_endpoint }}/_all" + url: "{{ opensearch_endpoint }}/_all" method: DELETE - name: Restore the snapshot uri: <<: *uri - url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}/{{ snapshot_name }}/_restore" + url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}/{{ snapshot_name }}/_restore" method: POST always: - - name: Start all kibana instances + - name: Start all OpenSearch Dashboards instances delegate_to: "{{ item }}" systemd: - name: kibana + name: opensearch-dashboards state: started enabled: true - loop: "{{ groups.kibana | default([]) }}" + loop: "{{ groups.opensearch_dashboards | default([]) }}" - name: Start all filebeat instances delegate_to: "{{ item }}" diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml index 27734b80ec..635930e643 100644 --- a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml @@ -38,3 +38,13 @@ files: 'https://helm.elastic.co/helm/filebeat/filebeat-7.9.2.tgz': sha256: 5140b4c4473ca33a0af4c3f70545dcc89735c0a179d974ebc150f1f28ac229ab + + # --- OpenSearch Bundle --- + 'https://artifacts.opensearch.org/releases/bundle/opensearch/1.2.4/opensearch-1.2.4-linux-x64.tar.gz': + sha256: d40f2696623b6766aa235997e2847a6c661a226815d4ba173292a219754bd8a8 + + 'https://artifacts.opensearch.org/releases/bundle/opensearch-dashboards/1.2.0/opensearch-dashboards-1.2.0-linux-x64.tar.gz': + sha256: 14623798e61be6913e2a218d6ba3e308e5036359d7bda58482ad2f1340aa3c85 + + 'https://github.com/opensearch-project/perftop/releases/download/1.2.0.0/opensearch-perf-top-1.2.0.0-linux-x64.zip': + sha256: e8f9683976001a8cf59a9f86da5caafa10b88643315f0af2baa93a9354d41e2b diff --git a/ansible/playbooks/roles/upgrade/defaults/main.yml b/ansible/playbooks/roles/upgrade/defaults/main.yml index e7e0a5f77a..3f96e23b88 100644 --- a/ansible/playbooks/roles/upgrade/defaults/main.yml +++ b/ansible/playbooks/roles/upgrade/defaults/main.yml @@ -5,7 +5,7 @@ logging: cert_path: /etc/elasticsearch/custom-admin.pem key_path: /etc/elasticsearch/custom-admin-key.pem -opendistro_for_elasticsearch: +opensearch: upgrade_config: custom_admin_certificate: cert_path: /etc/elasticsearch/custom-admin.pem @@ -18,7 +18,7 @@ opendistro_for_elasticsearch: dual_root_ca: filename: demo2epiphany-certs-migration-root-CAs.pem - upgrade_state_file_path: /etc/elasticsearch/epicli-upgrade-started.state + upgrade_state_file_path: /var/lib/epiphany/upgrade/state/opensearch-upgrade.uncompleted kubernetes: upgrade_state_file_path: /var/lib/epiphany/upgrade/state/kubernetes-{{ ver }}.uncompleted diff --git a/ansible/playbooks/roles/upgrade/tasks/elasticsearch-curator.yml b/ansible/playbooks/roles/upgrade/tasks/elasticsearch-curator.yml index f7731c3218..81af709f8f 100644 --- a/ansible/playbooks/roles/upgrade/tasks/elasticsearch-curator.yml +++ b/ansible/playbooks/roles/upgrade/tasks/elasticsearch-curator.yml @@ -24,6 +24,6 @@ - name: Update elasticsearch-curator package include_role: name: elasticsearch_curator - tasks_from: install-es-curator-{{ ansible_os_family }} # update only package and do not change configured cron jobs + tasks_from: install-ops-curator-{{ ansible_os_family }} # update only package and do not change configured cron jobs when: - curator_defaults.curator_version is version(ansible_facts.packages['elasticsearch-curator'][0].version, '>') diff --git a/ansible/playbooks/roles/upgrade/tasks/kibana.yml b/ansible/playbooks/roles/upgrade/tasks/kibana.yml deleted file mode 100644 index c8e3baab72..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/kibana.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -- name: Kibana | Get information about installed packages as facts - package_facts: - manager: auto - when: ansible_facts.packages is undefined - -# Kibana is upgraded only when there is no 'kibana-oss' package (replaced by 'opendistroforelasticsearch-kibana' since v0.5). -# This condition has been added to not fail when 'epicli upgrade' is run for Epiphany v0.4 cluster. -# We cannot upgrade Kibana to v7 having Elasticsearch v6. -- name: Upgrade Kibana - when: ansible_facts.packages['kibana-oss'] is undefined - block: - - name: Kibana | Assert that opendistroforelasticsearch-kibana package is installed - assert: - that: ansible_facts.packages['opendistroforelasticsearch-kibana'] is defined - fail_msg: opendistroforelasticsearch-kibana package not found, nothing to upgrade - quiet: true - - - name: Kibana | Load defaults from kibana role - include_vars: - file: roles/kibana/defaults/main.yml - name: kibana_defaults - - - name: Kibana | Print versions - debug: - msg: - - "Installed version: {{ ansible_facts.packages['opendistroforelasticsearch-kibana'][0].version }}" - - "Target version: {{ kibana_defaults.kibana_version[ansible_os_family] }}" - - - name: Upgrade Kibana - when: - - kibana_defaults.kibana_version[ansible_os_family] - is version(ansible_facts.packages['opendistroforelasticsearch-kibana'][0].version, '>=') - block: - - name: Kibana | Slurp /etc/kibana/kibana.yml - slurp: - src: /etc/kibana/kibana.yml - register: _kibana_config_yml - no_log: true - - - name: Kibana | Upgrade - import_role: - name: kibana - vars: - context: upgrade - existing_es_password: >- - {{ (_kibana_config_yml.content | b64decode | from_yaml)['elasticsearch.password'] }} diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-01.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-01.yml deleted file mode 100644 index b3f14e4137..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-01.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -- name: ODFE | Get information about installed packages as facts - package_facts: - manager: auto - when: ansible_facts.packages is undefined - -- name: ODFE | Assert that elasticsearch-oss package is installed - assert: - that: ansible_facts.packages['elasticsearch-oss'] is defined - fail_msg: elasticsearch-oss package not found, nothing to upgrade - quiet: true - -- name: ODFE | Include defaults from opendistro_for_elasticsearch role - include_vars: - file: roles/opendistro_for_elasticsearch/defaults/main.yml - name: odfe_defaults - -- name: ODFE | Patch log4j - include_role: - name: opendistro_for_elasticsearch - tasks_from: patch-log4j - when: odfe_defaults.log4j_file_name is defined - -- name: Restart elasticsearch service - systemd: - name: elasticsearch - state: restarted - register: restart_elasticsearch - when: odfe_defaults.log4j_file_name is defined and log4j_patch.changed - -- name: ODFE | Print elasticsearch-oss versions - debug: - msg: - - "Installed version: {{ ansible_facts.packages['elasticsearch-oss'][0].version }}" - - "Target version: {{ odfe_defaults.versions[ansible_os_family].elasticsearch_oss }}" - -# If state file exists it means the previous run failed -- name: ODFE | Check if upgrade state file exists - stat: - path: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}" - get_attributes: false - get_checksum: false - get_mime: false - register: stat_upgrade_state_file - -- name: ODFE | Upgrade Elasticsearch and ODFE plugins (part 1/2) - include_tasks: opendistro_for_elasticsearch/upgrade-elasticsearch-01.yml - when: _target_version is version(ansible_facts.packages['elasticsearch-oss'][0].version, '>') - or (_target_version is version(ansible_facts.packages['elasticsearch-oss'][0].version, '==') - and stat_upgrade_state_file.stat.exists) - vars: - _target_version: "{{ odfe_defaults.versions[ansible_os_family].elasticsearch_oss }}" diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-02.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-02.yml deleted file mode 100644 index 2b3f304465..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-02.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# If state file exists, it means upgrade has been started by the previous play and should be continued -- name: ODFE | Check if upgrade state file exists - stat: - path: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}" - get_attributes: false - get_checksum: false - get_mime: false - register: stat_upgrade_state_file - -- name: ODFE | Upgrade Elasticsearch and ODFE plugins (part 2/2) - include_tasks: opendistro_for_elasticsearch/upgrade-elasticsearch-02.yml - when: stat_upgrade_state_file.stat.exists diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-01.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-01.yml deleted file mode 100644 index 806c09a3d0..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-01.yml +++ /dev/null @@ -1,71 +0,0 @@ ---- -# ================================================================================================= -# Migration from demo certs to generated by Epiphany -# ------------------------------------------------------------------------------------------------- -# A) Parallel part (all nodes at the same time) - THIS FILE -# 1. Assert API access using demo cert (done in pre-migration part) -# 2. Generate Epiphany certs (done in pre-migration part) -# 3. Save cluster status to file (done in pre-migration part) -# 4. Create dual root CA file for the migration (demo + Epiphany root CAs concatenated), needed temporarily -# 5. Patch the following properties in existing elasticsearch.yml: -# a) opendistro_security.authcz.admin_dn - add Epiphany admin cert -# b) opendistro_security.nodes_dn - by default not present, add all Epiphany node certs -# c) opendistro_security.ssl.http.pemtrustedcas_filepath - replace demo root CA with the dual root CA file -# d) opendistro_security.ssl.transport.pemtrustedcas_filepath - replace demo root CA with the dual root CA file -# B) Serial part (node by node) - tasks from migrate-from-demo-certs-02.yml - -# Create dual root CA transitional file -- include_tasks: utils/create-dual-cert-file.yml - vars: - certs_to_concatenate: - - "{{ (certificates.dirs.certs, certificates.files.demo.root_ca.cert) | path_join }}" - - "{{ (certificates.dirs.certs, certificates.files.root_ca.cert.filename) | path_join }}" - target_path: "{{ (certificates.dirs.certs, opendistro_for_elasticsearch.certs_migration.dual_root_ca.filename) | path_join }}" - -- name: ODFE | Load /etc/elasticsearch/elasticsearch.yml - slurp: - src: /etc/elasticsearch/elasticsearch.yml - register: _elasticsearch_yml - -- name: OFDE | Patch /etc/elasticsearch/elasticsearch.yml (switch to dual root CA) - copy: - dest: /etc/elasticsearch/elasticsearch.yml - content: "{{ _patched_content | to_nice_yaml }}" - mode: u=rw,g=rw,o= - owner: root - group: elasticsearch - backup: true - vars: - _epiphany_subjects: - admin: "{{ certificates.files.admin.cert.subject }}" - node: "{{ certificates.files.node.cert.subject }}" - _epiphany_dn_attributes: - admin: "{{ certificates.dn_attributes_order | intersect(_epiphany_subjects.admin.keys()) }}" - node: "{{ certificates.dn_attributes_order | intersect(_epiphany_subjects.node.keys()) }}" - _epiphany_DNs: - admin: >- - {{ _epiphany_dn_attributes.admin | zip(_epiphany_dn_attributes.admin | map('extract', _epiphany_subjects.admin)) - | map('join','=') | join(',') }} - node: >- - {{ _epiphany_dn_attributes.node | zip(_epiphany_dn_attributes.node | map('extract', _epiphany_subjects.node)) - | map('join','=') | join(',') }} - _epiphany_nodes_dn: >- - {%- for node in ansible_play_hosts_all -%} - {%- if loop.first -%}[{%- endif -%} - '{{ _epiphany_DNs.node.split(',') | map('regex_replace', '^CN=.+$', 'CN=' + hostvars[node].ansible_nodename) | join(',') }}' - {%- if not loop.last -%},{%- else -%}]{%- endif -%} - {%- endfor -%} - _old_content: >- - {{ _elasticsearch_yml.content | b64decode | from_yaml }} - _updated_settings: - opendistro_security.authcz.admin_dn: >- - {{ _old_content['opendistro_security.authcz.admin_dn'] | default([]) | map('replace', ', ', ',') - | union([opendistro_for_elasticsearch.certs_migration.demo_DNs.admin] + [_epiphany_DNs.admin]) }} - opendistro_security.nodes_dn: >- - {{ _old_content['opendistro_security.nodes_dn'] | default([]) - | union([opendistro_for_elasticsearch.certs_migration.demo_DNs.node] + _epiphany_nodes_dn) }} - - opendistro_security.ssl.http.pemtrustedcas_filepath: "{{ opendistro_for_elasticsearch.certs_migration.dual_root_ca.filename }}" - opendistro_security.ssl.transport.pemtrustedcas_filepath: "{{ opendistro_for_elasticsearch.certs_migration.dual_root_ca.filename }}" - _patched_content: >- - {{ _old_content | combine(_updated_settings) }} diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-02.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-02.yml deleted file mode 100644 index 223f6968df..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-02.yml +++ /dev/null @@ -1,115 +0,0 @@ ---- -# ================================================================================================= -# Migration from demo certs to generated by Epiphany -# ------------------------------------------------------------------------------------------------- -# A) Parallel part (all nodes at the same time) - tasks from migrate-from-demo-certs-01.yml -# B) Serial part (node by node) - THIS FILE -# 1. Prepare cluster for a node restart (disable shard allocation) -# 2. Restart all nodes one by one waiting for yellow cluster status after each restart -# 3. Patch elasticsearch.yml to use Epiphany node cert instead of demo (all nodes) -# 4. Restart all nodes one by one waiting for yellow cluster status after each restart -# 5. Re-enable shard allocation -# 6. Wait for green/yellow cluster status -# 7. Test API access using Epiphany admin cert (all nodes) -# 8. Update API related facts to use Epiphany admin cert instead of demo -# 9. Reload config file - -- when: inventory_hostname == ansible_play_hosts_all[0] # run once - block: - # Prepare cluster for a node restart - - include_tasks: utils/prepare-cluster-for-node-restart.yml - - # Restart all nodes (special flow: run once but in loop for each host) - - include_tasks: - file: utils/restart-node.yml - apply: - delegate_to: "{{ target_inventory_hostname }}" - delegate_facts: true - loop: "{{ ansible_play_hosts_all }}" - loop_control: - loop_var: target_inventory_hostname - - # Patch elasticsearch.yml to use Epiphany node cert (all hosts) - - - name: ODFE | Load /etc/elasticsearch/elasticsearch.yml - slurp: - src: /etc/elasticsearch/elasticsearch.yml - register: _elasticsearch_yml - delegate_to: "{{ target_inventory_hostname }}" - loop: "{{ ansible_play_hosts_all }}" - loop_control: - loop_var: target_inventory_hostname - - - name: OFDE | Patch /etc/elasticsearch/elasticsearch.yml (switch to Epiphany node certificates) - copy: - dest: /etc/elasticsearch/elasticsearch.yml - content: "{{ _patched_content | to_nice_yaml }}" - mode: u=rw,g=rw,o= - owner: root - group: elasticsearch - backup: true - delegate_to: "{{ target_inventory_hostname }}" - delegate_facts: true - loop: "{{ ansible_play_hosts_all }}" - loop_control: - index_var: loop_index0 - loop_var: target_inventory_hostname - vars: - _node_hostname: "{{ hostvars[target_inventory_hostname].ansible_nodename }}" - _epiphany_node_cert: - cert_filename: "{{ certificates.files.node.cert.filename | replace(ansible_nodename, _node_hostname) }}" - key_filename: "{{ certificates.files.node.key.filename | replace(ansible_nodename, _node_hostname) }}" - _old_content: >- - {{ _elasticsearch_yml.results[loop_index0].content | b64decode | from_yaml }} - _updated_settings: - opendistro_security.ssl.http.pemcert_filepath: "{{ _epiphany_node_cert.cert_filename }}" - opendistro_security.ssl.http.pemkey_filepath: "{{ _epiphany_node_cert.key_filename }}" - opendistro_security.ssl.transport.pemcert_filepath: "{{ _epiphany_node_cert.cert_filename }}" - opendistro_security.ssl.transport.pemkey_filepath: "{{ _epiphany_node_cert.key_filename }}" - _patched_content: >- - {{ _old_content | combine(_updated_settings) }} - - # Restart all nodes (special flow: run once but in loop for each host) - - include_tasks: - file: utils/restart-node.yml - apply: - delegate_to: "{{ target_inventory_hostname }}" - delegate_facts: true - loop: "{{ ansible_play_hosts_all }}" - loop_control: - loop_var: target_inventory_hostname - - # Re-enable shard allocation - - include_tasks: utils/enable-shard-allocation.yml - - # Wait for shard allocation (for 'green' status at least 2 nodes must be already upgraded) - - include_tasks: utils/wait-for-shard-allocation.yml - - # Test API access using Epiphany admin cert (all nodes) - - include_tasks: - file: utils/assert-api-access.yml - apply: - delegate_to: "{{ target_inventory_hostname }}" - delegate_facts: true - loop: "{{ ansible_play_hosts_all }}" - loop_control: - loop_var: target_inventory_hostname - vars: - es_api: - cert_type: Epiphany - cert_path: &epi_cert_path "{{ (certificates.dirs.certs, certificates.files.admin.cert.filename) | path_join }}" - key_path: &epi_key_path "{{ (certificates.dirs.certs, certificates.files.admin.key.filename) | path_join }}" - url: "{{ hostvars[target_inventory_hostname].es_api.url }}" - fail_msg: API access test failed. - -- name: Update API related facts to use Epiphany admin certificate instead of demo - set_fact: - es_api: "{{ es_api | combine(_es_api) }}" - vars: - _es_api: - cert_type: Epiphany - cert_path: *epi_cert_path - key_path: *epi_key_path - -# Reload config file to preserve patched settings (sets 'existing_config' fact) -- include_tasks: utils/get-config-from-files.yml diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-non-clustered.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-non-clustered.yml deleted file mode 100644 index addd327aa3..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-non-clustered.yml +++ /dev/null @@ -1,77 +0,0 @@ ---- -- name: ODFE | Load /etc/elasticsearch/elasticsearch.yml - slurp: - src: /etc/elasticsearch/elasticsearch.yml - register: _elasticsearch_yml - -- name: OFDE | Patch /etc/elasticsearch/elasticsearch.yml (switch to generated certificates) - copy: - dest: /etc/elasticsearch/elasticsearch.yml - content: "{{ _patched_content | to_nice_yaml }}" - mode: u=rw,g=rw,o= - owner: root - group: elasticsearch - backup: true - vars: - _epiphany_subjects: - admin: "{{ certificates.files.admin.cert.subject }}" - node: "{{ certificates.files.node.cert.subject }}" - _epiphany_dn_attributes: - admin: "{{ certificates.dn_attributes_order | intersect(_epiphany_subjects.admin.keys()) }}" - node: "{{ certificates.dn_attributes_order | intersect(_epiphany_subjects.node.keys()) }}" - _epiphany_DNs: - admin: >- - {{ _epiphany_dn_attributes.admin | zip(_epiphany_dn_attributes.admin | map('extract', _epiphany_subjects.admin)) - | map('join','=') | join(',') }} - node: >- - {{ _epiphany_dn_attributes.node | zip(_epiphany_dn_attributes.node | map('extract', _epiphany_subjects.node)) - | map('join','=') | join(',') }} - _old_content: >- - {{ _elasticsearch_yml.content | b64decode | from_yaml }} - _updated_settings: - opendistro_security.authcz.admin_dn: >- - {{ _old_content['opendistro_security.authcz.admin_dn'] | default([]) | map('replace', ', ', ',') - | union([_epiphany_DNs.admin]) }} - opendistro_security.nodes_dn: >- - {{ _old_content['opendistro_security.nodes_dn'] | default([]) - | union([_epiphany_DNs.node]) }} - - opendistro_security.ssl.http.pemcert_filepath: "{{ certificates.files.node.cert.filename }}" - opendistro_security.ssl.http.pemkey_filepath: "{{ certificates.files.node.key.filename }}" - opendistro_security.ssl.transport.pemcert_filepath: "{{ certificates.files.node.cert.filename }}" - opendistro_security.ssl.transport.pemkey_filepath: "{{ certificates.files.node.key.filename }}" - - opendistro_security.ssl.http.pemtrustedcas_filepath: "{{ certificates.files.root_ca.cert.filename }}" - opendistro_security.ssl.transport.pemtrustedcas_filepath: "{{ certificates.files.root_ca.cert.filename }}" - - _patched_content: >- - {{ _old_content | combine(_updated_settings) }} - -- include_tasks: - file: utils/restart-node.yml - vars: - target_inventory_hostname: "{{ inventory_hostname }}" - skip_waiting_for_node: true # because after restart demo certificate stops working - -# Test API access using Epiphany admin cert -- include_tasks: - file: utils/assert-api-access.yml - vars: - es_api: - cert_type: Epiphany - cert_path: &epi_cert_path "{{ (certificates.dirs.certs, certificates.files.admin.cert.filename) | path_join }}" - key_path: &epi_key_path "{{ (certificates.dirs.certs, certificates.files.admin.key.filename) | path_join }}" - url: "{{ hostvars[inventory_hostname].es_api.url }}" - fail_msg: API access test failed. - -- name: Update API related facts to use Epiphany admin certificate instead of demo - set_fact: - es_api: "{{ es_api | combine(_es_api) }}" - vars: - _es_api: - cert_type: Epiphany - cert_path: *epi_cert_path - key_path: *epi_key_path - -# Reload config file to preserve patched settings (sets 'existing_config' fact) -- include_tasks: utils/get-config-from-files.yml diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-01.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-01.yml deleted file mode 100644 index e709502eda..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-01.yml +++ /dev/null @@ -1,157 +0,0 @@ ---- -# This file contains only pre-upgrade tasks that can be run in parallel on all hosts - -- name: ODFE | Create upgrade state file - become: true - file: - path: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}" - state: touch - mode: u=rw,g=r,o= - -- name: ODFE | Ensure elasticsearch service is running - systemd: - name: elasticsearch - enabled: yes - state: started - register: elasticsearch_state - -# Sets 'existing_config' fact -- include_tasks: utils/get-config-from-files.yml - -- name: ODFE | Set common facts - set_fact: - certificates: "{{ odfe_defaults.certificates }}" - es_host: "{{ existing_config.main['network.host'] | default('_local_') }}" - es_http_port: "{{ existing_config.main['http.port'] | default(odfe_defaults.ports.http) }}" - es_transport_port: "{{ existing_config.main['transport.port'] | default(odfe_defaults.ports.transport) }}" - es_clustered: "{{ (existing_config.main['discovery.seed_hosts'] | length > 1) | ternary(True, False) }}" - es_node_name: "{{ existing_config.main['node.name'] }}" - -- name: ODFE | Wait for elasticsearch service to start up - wait_for: - port: "{{ es_transport_port }}" - host: "{{ es_host if (es_host is not regex('^_.+_$')) else '0.0.0.0' }}" # 0.0.0.0 means any IP - when: elasticsearch_state.changed - -# This block requires elasticsearch service to be running -- name: Get host address when special value is used # e.g. '_site_' - when: es_host is regex('^_.+_$') - block: - - name: Gather facts on listening ports - community.general.listen_ports_facts: - - - name: Get host address based on transport port - set_fact: - es_host: "{{ ansible_facts.tcp_listen | selectattr('port', '==', es_transport_port|int) - | map(attribute='address') | reject('match', '::') | first }}" - -# NOTE: We need admin certificate for passwordless administrative access to REST API (since we don't know admin's password) - -- include_role: - name: certificate - tasks_from: install-packages # requirements for Ansible certificate modules - -- name: ODFE | Get information on root CA certificate - community.crypto.x509_certificate_info: - # 'pemtrustedcas_filepath' is a relative path - path: "{{ ('/etc/elasticsearch', existing_config.main['opendistro_security.ssl.transport.pemtrustedcas_filepath']) | path_join }}" - register: _root_ca_info - -- name: ODFE | Check if demo or Epiphany certificates are in use # self-signed - set_fact: - _is_demo_cert_in_use: "{{ 'True' if _root_ca_info.subject.commonName == 'Example Com Inc. Root CA' else 'False' }}" - _is_epiphany_cert_in_use: "{{ 'True' if _root_ca_info.subject.commonName == 'Epiphany Managed ODFE Root CA' else 'False' }}" - -# For custom admin cert (non-demo and non-Epiphany), we use workaround (upgrade_config.custom_admin_certificate). -# The workaround should be replaced after implementing task #2127. -- name: ODFE | Set API access facts - set_fact: - es_api: - cert_path: "{{ _cert_path[_cert_type] }}" - cert_type: "{{ _cert_type }}" - key_path: "{{ _key_path[_cert_type] }}" - url: https://{{ es_host }}:{{ es_http_port }} - vars: - _cert_type: >- - {{ 'demo' if (_is_demo_cert_in_use) else - 'Epiphany' if (_is_epiphany_cert_in_use) else - 'custom' }} - _cert_path: - custom: "{{ lookup('vars', current_group_name).upgrade_config.custom_admin_certificate.cert_path }}" # defaults are not available via hostvars - demo: "{{ (certificates.dirs.certs, certificates.files.demo.admin.cert) | path_join }}" - Epiphany: "{{ (certificates.dirs.certs, certificates.files.admin.cert.filename) | path_join }}" - _key_path: - custom: "{{ lookup('vars', current_group_name).upgrade_config.custom_admin_certificate.key_path }}" - demo: "{{ (certificates.dirs.certs, certificates.files.demo.admin.key) | path_join }}" - Epiphany: "{{ (certificates.dirs.certs, certificates.files.admin.key.filename) | path_join }}" - -- include_tasks: utils/assert-cert-files-exist.yml - -# ================================================================================================= -# FLOW -# ------------------------------------------------------------------------------------------------- -# NOTE: For clustered nodes it's recommended to disable shard allocation for the cluster before restarting a node (https://www.elastic.co/guide/en/elasticsearch/reference/current/restart-cluster.html#restart-cluster-rolling) -# -# if cert_type == 'demo': -# Test API access -# Genereate Epiphany self-signed certs -# Save cluster status to file -# Run certificates migration procedure for all nodes when 'es_clustered is true' -# // Subtasks of the migration procedure: -# Test API access -# Update API related facts to use Epiphany admin certificate instead of demo -# if cert_type == 'Epiphany': -# Genereate Epiphany self-signed certs - to re-new certs if expiration date differs -# Test API access -# Save cluster status to file -# if cert_type == 'custom': -# Test API access -# Save cluster status to file -# Run upgrade (removes known demo certificate files) -# if cert_type == 'Epiphany': -# Remove dual root CA file (created as part of the migration, needed until all nodes are upgraded) -# ================================================================================================= - -# Test API access (demo or custom certs) -- include_tasks: utils/assert-api-access.yml - when: es_api.cert_type in ['demo', 'custom'] - vars: - _fail_msg: - common: Test of accessing API with TLS authentication failed. - custom: >- - It looks like you use custom certificates. - Please refer to 'Open Distro for Elasticsearch upgrade' section of How-To docs. - demo: >- - It looks like you use demo certificates but your configuration might be incorrect or unsupported. - fail_msg: "{{ _fail_msg.common }} {{ _fail_msg[es_api.cert_type] }}" - -- name: Generate self-signed certificates - include_role: - name: opendistro_for_elasticsearch - tasks_from: generate-certs - when: es_api.cert_type != 'custom' - -# Test API access (Epiphany certs) -- include_tasks: utils/assert-api-access.yml - when: es_api.cert_type == 'Epiphany' - vars: - fail_msg: >- - Test of accessing API with TLS authentication failed. - It looks like you use certificates generated by Epiphany but your configuration might be incorrect or an unexpected error occurred. - -# Save cluster health status before upgrade to file -- include_tasks: utils/save-initial-cluster-status.yml - -# Run migration procedure - the first (parallel) part for clustered installation -- include_tasks: migrate-from-demo-certs-01.yml - when: - - es_api.cert_type == 'demo' - - es_clustered # rolling upgrade only for clustered installation - -# Run migration procedure for non-clustered installation -- include_tasks: migrate-from-demo-certs-non-clustered.yml - when: - - es_api.cert_type == 'demo' - - not es_clustered - -# Next tasks are run in serial mode in the next play diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-02.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-02.yml deleted file mode 100644 index 237f34d4d2..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-02.yml +++ /dev/null @@ -1,109 +0,0 @@ ---- -# This file contains flow that cannot be run in parallel on multiple hosts because of rolling upgrades. -# It's run after upgrade-elasticsearch-01.yml so some facts are already set. - -# Run migration procedure - the second (serial) part -- include_tasks: opendistro_for_elasticsearch/migrate-from-demo-certs-02.yml - when: - - es_api.cert_type == 'demo' - - es_clustered # rolling upgrade only for clustered installation - -- name: ODFE | Print API facts - debug: - var: es_api - tags: [ never, debug ] # only runs when debug or never tag requested - -- name: ODFE | Prepare cluster for rolling upgrade - include_tasks: opendistro_for_elasticsearch/utils/prepare-cluster-for-node-restart.yml - when: es_clustered - -- name: ODFE | Stop elasticsearch service - systemd: - name: elasticsearch - state: stopped - -- name: ODFE | Include Elasticsearch installation tasks - include_role: - name: opendistro_for_elasticsearch - tasks_from: install-es.yml - -- name: ODFE | Include Elasticsearch configuration tasks - include_role: - name: opendistro_for_elasticsearch - tasks_from: configure-es.yml - vars: - _old: "{{ existing_config.main }}" - # Keep the same data structure as for apply mode - specification: - jvm_options: "{{ existing_config.jvm_options }}" - cluster_name: "{{ _old['cluster.name'] }}" - clustered: "{{ 'True' if _old['discovery.seed_hosts'] | length > 1 else 'False' }}" - paths: - data: "{{ _old['path.data'] }}" - repo: "{{ _old['path.repo'] | default('/var/lib/elasticsearch-snapshots') }}" # absent in Epiphany v0.6 thus we use default - logs: "{{ _old['path.logs'] }}" - opendistro_security: - ssl: - transport: - enforce_hostname_verification: "{{ _old['opendistro_security.ssl.transport.enforce_hostname_verification'] }}" - - _demo_DNs: - admin: "{{ opendistro_for_elasticsearch.certs_migration.demo_DNs.admin }}" - node: "{{ opendistro_for_elasticsearch.certs_migration.demo_DNs.node }}" - _dual_root_ca_filename: "{{ opendistro_for_elasticsearch.certs_migration.dual_root_ca.filename }}" - _epiphany_root_ca_filename: "{{ certificates.files.root_ca.cert.filename }}" - _updated_existing_config: - opendistro_security.authcz.admin_dn: "{{ _old['opendistro_security.authcz.admin_dn'] | reject('search', _demo_DNs.admin) }}" - opendistro_security.nodes_dn: "{{ _old['opendistro_security.nodes_dn'] | default([]) | reject('search', _demo_DNs.node) }}" - opendistro_security.ssl.http.pemtrustedcas_filepath: >- - {{ _old['opendistro_security.ssl.http.pemtrustedcas_filepath'] | replace(_dual_root_ca_filename, _epiphany_root_ca_filename) }} - opendistro_security.ssl.transport.pemtrustedcas_filepath: >- - {{ _old['opendistro_security.ssl.transport.pemtrustedcas_filepath'] | replace(_dual_root_ca_filename, _epiphany_root_ca_filename) }} - - http.port: "{{ _old['http.port'] | default(odfe_defaults.ports.http) }}" - transport.port: "{{ _old['transport.port'] | default(odfe_defaults.ports.transport) }}" - - existing_es_config: "{{ _old | combine(_updated_existing_config) }}" - -- name: ODFE | Include upgrade plugins tasks - include_tasks: opendistro_for_elasticsearch/upgrade-plugins.yml - -# Restart elasticsearch service (unconditionally to ensure this task is not skipped in case of rerunning after interruption) -- include_tasks: opendistro_for_elasticsearch/utils/restart-node.yml - vars: - daemon_reload: true # opendistro-performance-analyzer provides opendistro-performance-analyzer.service - target_inventory_hostname: "{{ inventory_hostname }}" - -# Post-upgrade tasks - -- name: Re-enable shard allocation - when: es_clustered - block: - - include_tasks: opendistro_for_elasticsearch/utils/enable-shard-allocation.yml - - - include_tasks: opendistro_for_elasticsearch/utils/wait-for-shard-allocation.yml - -# Read cluster health status from before the upgrade -- name: Load upgrade state file - slurp: - src: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}" - register: slurp_upgrade_state_file - -# Verify cluster status -- include_tasks: opendistro_for_elasticsearch/utils/wait-for-cluster-status.yml - when: not es_clustered or - (es_clustered and inventory_hostname == ansible_play_hosts_all[-1]) # for 'green' status at least 2 nodes must be already upgraded - vars: - initial_status: "{{ (slurp_upgrade_state_file.content | b64decode | from_json)['status'] }}" - expected_status: "{{ [ initial_status, 'green'] | unique }}" - -- name: ODFE | Remove dual root CA temporary file - file: - path: "{{ (certificates.dirs.certs, opendistro_for_elasticsearch.certs_migration.dual_root_ca.filename) | path_join }}" - state: absent - when: es_api.cert_type == 'Epiphany' - -- name: ODFE | Remove upgrade state file - file: - path: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}" - state: absent diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-plugins.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-plugins.yml deleted file mode 100644 index 80e34e6382..0000000000 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-plugins.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: ODFE plugins | Assert that opendistro-* packages are installed - assert: - that: ansible_facts.packages['{{ item }}'] is defined - fail_msg: "Missing package to upgrade: {{ item }}" - quiet: true - loop: - - opendistro-alerting - - opendistro-index-management - - opendistro-job-scheduler - - opendistro-performance-analyzer - - opendistro-security - - opendistro-sql - -- name: ODFE plugins | Upgrade opendistro-* packages - include_role: - name: opendistro_for_elasticsearch - tasks_from: install-opendistro.yml diff --git a/ansible/playbooks/roles/upgrade/tasks/opensearch.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch.yml new file mode 100644 index 0000000000..6ef6d2e430 --- /dev/null +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch.yml @@ -0,0 +1,70 @@ +--- +- name: OpenSearch | Get information about installed packages as facts + package_facts: + manager: auto + when: ansible_facts.packages is undefined + +- name: OpenSearch | Assert that elasticsearch-oss package is installed + assert: + that: ansible_facts.packages['elasticsearch-oss'] is defined + fail_msg: elasticsearch-oss package not found, nothing to upgrade + quiet: true + +- name: OpenSearch | Include defaults from OpenSearch role + include_vars: + file: roles/opensearch/defaults/main.yml + name: opensearch_defaults + +- name: OpenSearch | Include vars from opensearch role # requires epicli upgrade -f .yml + include_vars: + file: roles/opensearch/vars/main.yml + name: opensearch_vars + +- name: OpenSearch | Ensure Opensearch service user exists + user: + name: "{{ opensearch_vars.specification.opensearch_os_user }}" + state: present + shell: /bin/bash + +- name: OpenSearch | Ensure directory structure exists + file: + path: "{{ item }}" + state: directory + owner: "{{ opensearch_vars.specification.opensearch_os_user }}" + group: "{{ opensearch_vars.specification.opensearch_os_user }}" + mode: u=rw,go=r + recurse: yes + with_items: + - "{{ opensearch_vars.specification.paths.opensearch_home }}" + - "{{ opensearch_vars.specification.paths.opensearch_log_dir }}" + - "{{ opensearch_vars.specification.paths.opensearch_conf_dir }}" + - "{{ opensearch_defaults.certificates.dirs.certs }}" + +- name: OpenSearch | Print ElasticSearch ond OpenSearch versions + debug: + msg: + - "Installed version: {{ ansible_facts.packages['elasticsearch-oss'][0].version }}" + - "Target version: {{ opensearch_defaults.file_name_version.opensearch[ansible_architecture].split('-')[1] }}" + +# If state file exists it means the previous run failed +- name: OpenSearch | Check if upgrade state file exists + stat: + path: "{{ opensearch.upgrade_state_file_path }}" + get_attributes: false + get_checksum: false + get_mime: false + register: stat_upgrade_state_file + +- include_role: + name: upgrade + tasks_from: opensearch/migrate-odfe + when: opensearch_vars.specification.odfe_migration + vars: + current_group_name: logging + +- include_role: + name: upgrade + tasks_from: opensearch/migrate-kibana + when: opensearch_vars.specification.odfe_migration + vars: + current_group_name: logging diff --git a/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-kibana.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-kibana.yml new file mode 100644 index 0000000000..6cd7d0e16f --- /dev/null +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-kibana.yml @@ -0,0 +1,98 @@ +--- +- name: Kibana migration | Load deafults from Opensearch Dashboards role + include_vars: + file: roles/opensearch_dashboards/defaults/main.yml + +- name: Kibana migration | Load vars from Opensearch Dashboards role # requires epicli upgrade -f .yml + include_vars: + file: roles/opensearch_dashboards/vars/main.yml + +- name: Kibana migration | Stop Kibana service + systemd: + name: kibana + enabled: no + state: stopped + +- name: Kibana migration | Download Opensearch Dashboards binary + include_role: + name: download + tasks_from: download_file + vars: + file_name: "{{ file_name_version.opensearch_dashboards[ansible_architecture] }}" + +- name: Kibana migration | Create opensearch-dashboards user + user: + name: "{{ specification.dashboards_os_user }}" + password: "{{ specification.dashboards_os_user_password }}" + state: present + shell: /bin/bash + +- name: Kibana migration | Create OPSD directories + file: + path: "{{ item }}" + state: directory + owner: "{{ specification.dashboards_os_user }}" + group: "{{ specification.dashboards_os_user }}" + mode: ug=rwx,o=rx + with_items: + - "{{ specification.paths.opensearchdash_log_dir }}" + - "{{ specification.paths.opensearchdash_home }}" + +- name: Kibana migration | Extract the tar file + unarchive: + src: "{{ download_directory }}/{{ file_name_version.opensearch_dashboards[ansible_architecture] }}" + dest: "{{ specification.paths.opensearchdash_home }}" + owner: "{{ specification.dashboards_os_user }}" + remote_src: yes + extra_opts: + - --strip-components=1 + +- name: Kibana migration | Clone kibana settings + copy: + src: /etc/kibana/kibana.yml + dest: "{{ specification.paths.opensearchdash_conf_dir }}/opensearch_dashboards.yml" + remote_src: yes + owner: "{{ specification.dashboards_os_user }}" + group: root + mode: ug=rw,o= + backup: yes + +- name: Kibana migration | Porting kibana settings to OpenSearch Dashboards + replace: + path: "{{ specification.paths.opensearchdash_conf_dir }}/opensearch_dashboards.yml" + regexp: "{{ item.1 }}" + replace: "{{ item.2 }}" + with_items: + - { 1: 'elasticsearch', 2: 'opensearch' } + - { 1: '/kibana', 2: '/opensearchdashboards' } + - { 1: 'opendistro_security', 2: 'opensearch_security' } +# OPS claims to not recognize these 3 following Kibana variables + - { 1: 'newsfeed.enabled', 2: '#newsfeed.enabled' } + - { 1: 'telemetry.optIn', 2: '#telemetry.optIn' } + - { 1: 'telemetry.enabled', 2: '#telemetry.enabled' } + +- name: Kibana migration | Create OpenSearch Dashboards service + template: + src: roles/opensearch_dashboards/templates/opensearch-dashboards.service.j2 + dest: /etc/systemd/system/opensearch-dashboards.service + +- name: Kibana migration | Assure Opensearch Dashboards service is started + service: + name: opensearch-dashboards + state: started + enabled: yes + +- name: Kibana migration | Get all the installed dashboards plugins + command: "{{ specification.paths.opensearchdash_plugin_bin_path }} list" + become: false # This command can not be run as root user + register: list_plugins + +- name: Kibana migration | Show all the installed dashboards plugins + debug: + msg: "{{ list_plugins.stdout }}" + +- name: Kibana migration | Prevent Filebeat API access problem # Workaround for https://github.com/opensearch-project/OpenSearch-Dashboards/issues/656 + replace: + path: /etc/filebeat/filebeat.yml + regexp: 'setup.dashboards.enabled: true' + replace: 'setup.dashboards.enabled: false' diff --git a/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe-serial.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe-serial.yml new file mode 100644 index 0000000000..a50234e4bf --- /dev/null +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe-serial.yml @@ -0,0 +1,97 @@ +--- +# Below tasks need to be run in serial +- name: ODFE migration | Stop elasticsearch service + systemd: + name: elasticsearch + enabled: no + state: stopped + register: elasticsearch_state + +- name: ODFE migration | Include defaults Opensearch binaries installation + include_vars: + file: roles/opensearch/defaults/main.yml + +- name: ODFE migration | Include vars for Opensearch binaries installation + include_vars: + file: roles/opensearch/vars/main.yml + +- name: ODFE migration | Inastall Opensearch binaries + include_tasks: roles/opensearch/tasks/install-opensearch.yml + +- name: ODFE migration | Copy ES directories to OPS directories + copy: + src: "{{ item.1 }}" + dest: "{{ item.2 }}" + remote_src: yes + owner: "{{ opensearch_vars.specification.opensearch_os_user }}" + group: root + mode: ug=rwx,o= + directory_mode: yes + with_items: + - { 1: "/var/lib/elasticsearch-snapshots/", 2: "{{ specification.paths.opensearch_repo }}/" } + - { 1: "/var/lib/elasticsearch", 2: "{{ specification.paths.opensearch_data }}" } + +- name: ODFE migration | Prepare a list of ESS certs and keys + find: + paths: "/etc/elasticsearch/" + patterns: "*pem" + register: pem_files + +- name: ODFE migration | Copy a list of certs and keys to OPS directories + copy: + src: "{{ item.path }}" + dest: "{{ specification.paths.opensearch_conf_dir }}/" + remote_src: yes + with_items: "{{ pem_files.files }}" + +- name: ODFE migration | Clone JVM configuration file + copy: + src: /etc/elasticsearch/jvm.options + dest: "{{ specification.paths.opensearch_conf_dir }}/jvm.options" + remote_src: yes + owner: root + group: opensearch + mode: ug=rw,o= + backup: yes + +- name: ODFE migration | Update JVM configuration file + replace: + path: "{{ specification.paths.opensearch_conf_dir }}/jvm.options" + regexp: "{{ item.1 }}" + replace: "{{ item.2 }}" + with_items: + - { 1: 'elasticsearch', 2: 'opensearch' } + - { 1: '\${ES_TMPDIR}', 2: '${OPENSEARCH_TMPDIR}' } + +- name: ODFE migration | Clone main configuration file + copy: + src: /etc/elasticsearch/elasticsearch.yml + dest: "{{ specification.paths.opensearch_conf_dir }}/opensearch.yml" + remote_src: yes + owner: root + group: opensearch + mode: ug=rw,o= + backup: yes + +- name: ODFE migration | Update main configuration file + replace: + path: "{{ specification.paths.opensearch_conf_dir }}/opensearch.yml" + regexp: "{{ item.1 }}" + replace: "{{ item.2 }}" + with_items: + - { 1: 'elasticsearch', 2: 'opensearch' } + - { 1: 'EpiphanyElastic', 2: 'EpiphanyOpensearch' } + - { 1: 'opendistro_security.', 2: 'plugins.security.' } + +- name: ODFE migration | Start OpenSearch service + systemd: + name: opensearch + state: started + enabled: yes + register: restart_opensearch + +- name: ODFE migration | Wait for opensearch to startup + wait_for: + port: 9200 + host: "{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}" + sleep: 6 diff --git a/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe.yml new file mode 100644 index 0000000000..144747b32c --- /dev/null +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe.yml @@ -0,0 +1,224 @@ +--- +- name: OpenSearch | Get information about installed packages as facts + package_facts: + manager: auto + when: ansible_facts.packages is undefined + +- name: OpenSearch | Print ElasticSearch ond OpenSearch versions + debug: + msg: + - "Elasticsearch version currently installed: {{ ansible_facts.packages['elasticsearch-oss'][0].version }}" + - "Opensearch version to be installed: {{ opensearch_defaults.file_name_version.opensearch[ansible_architecture].split('-')[1] }}" + +- name: ODFE migration | Ensure elasticsearch cluster is up and running + block: + - name: OpenSearch | Include vars from opensearch role # requires epicli upgrade -f .yml + include_vars: + file: roles/opensearch/vars/main.yml + name: opensearch_vars + + - name: ODFE migration | Ensure elasticsearch cluster is up and running + systemd: + name: elasticsearch + enabled: yes + state: restarted + register: elasticsearch_state + rescue: + - name: ODFE migration | Suggest potential problem solution and fail + fail: + msg: "Are you trying to migrate from ODFE ( opensearch_vars.specification.odfe_migration: true ) on an already migrated server?" + when: opensearch_vars.specification.odfe_migration + +- name: ODFE migration | Set existing_config facts + include_tasks: opensearch/utils/get-config-from-files.yml + +- name: ODFE migration | Set common facts + set_fact: + certificates: "{{ opensearch_defaults.certificates }}" + es_host: "{{ existing_config.main['network.host'] | default('_local_') }}" + es_http_port: "{{ existing_config.main['http.port'] | default(opensearch_defaults.ports.http) }}" + es_transport_port: "{{ existing_config.main['transport.port'] | default(opensearch_defaults.ports.transport) }}" + es_clustered: "{{ (existing_config.main['discovery.seed_hosts'] | length > 1) | ternary(True, False) }}" + es_node_name: "{{ existing_config.main['node.name'] }}" + +- name: ODFE migration | Assure Elasticsearch files location will be used in following tasks + set_fact: + certificates: + dirs: + certs: "/etc/elasticsearch" + ca_key: "/etc/elasticsearch/private" + csr: "/etc/elasticsearch/csr" + files: + admin: + cert: "epiphany-admin.pem" + key: "epiphany-admin-key.pem" + +- name: ODFE migration | Prepare for ODFE to OPS migration + include_tasks: + file: opensearch/utils/prepare-cluster-for-node-restart.yml + apply: + delegate_to: "{{ target_inventory_hostname }}" + delegate_facts: true + loop: "{{ groups.logging | default([]) }}" + loop_control: + loop_var: target_inventory_hostname + vars: + es_api: + cert_type: Epiphany + cert_path: "{{ (certificates.dirs.certs, certificates.files.admin.cert) | path_join }}" + key_path: "{{ (certificates.dirs.certs, certificates.files.admin.key) | path_join }}" + url: https://{{ es_host }}:{{ es_http_port }} + fail_msg: API access test failed. + +- name: ODFE migration | Run core migration tasks individually on each node + include_tasks: + file: opensearch/migrate-odfe-serial.yml + apply: + delegate_to: "{{ target_hostname }}" + delegate_facts: true + loop: "{{ groups.logging | default([]) }}" + loop_control: + loop_var: target_hostname + run_once: true + +- name: ODFE migration | Check if default admin user exists + uri: + url: "https://{{ inventory_hostname }}:{{ ports.http }}/_opendistro/_security/api/internalusers/admin" + method: GET + # 404 code is used there as someone can remove admin user on its own. + status_code: [200, 404] + validate_certs: no + client_cert: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.cert }}" + client_key: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.key }}" + register: admin_check_response + until: admin_check_response is success + retries: 60 + delay: 1 + run_once: true + +- name: ODFE migration | Set Opensearch admin password + uri: + url: "https://{{ inventory_hostname }}:{{ ports.http }}/_opendistro/_security/api/internalusers" + method: PATCH + status_code: [200] + body: + - op: "replace" + path: "/admin" + value: + password: "{{ specification.admin_password }}" + reserved: "true" + backend_roles: + - "admin" + description: "Admin user" + client_cert: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.cert }}" + client_key: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.key }}" + body_format: json + validate_certs: no + register: uri_response + until: uri_response is success + retries: 5 + delay: 1 + run_once: true + when: admin_check_response.status == 200 + +- name: ODFE migration | Check if kibanaserver user exists + uri: + url: "https://{{ inventory_hostname }}:{{ ports.http }}/_opendistro/_security/api/internalusers/kibanaserver" + method: GET + # 404 code is used there as someone can remove admin user on its own. + status_code: [200, 404] + validate_certs: no + client_cert: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.cert }}" + client_key: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.key }}" + register: kibanaserver_check_response + until: kibanaserver_check_response is success + retries: 60 + delay: 1 + run_once: true + +- name: ODFE migration | Set kibanaserver user password + uri: + url: "https://{{ inventory_hostname }}:{{ ports.http }}/_opendistro/_security/api/internalusers" + method: PATCH + status_code: [200] + body: + - op: "replace" + path: "/kibanaserver" + value: + password: "{{ specification.kibanaserver_password }}" + reserved: "true" + description: "kibanaserver user" + client_cert: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.cert }}" + client_key: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.key }}" + body_format: json + validate_certs: no + register: uri_response + until: uri_response is success + retries: 5 + delay: 1 + run_once: true + when: kibanaserver_check_response.status == 200 + +- name: ODFE migration | Check if logstash user exists + uri: + url: "https://{{ inventory_hostname }}:{{ ports.http }}/_opendistro/_security/api/internalusers/logstash" + method: GET + # 404 code is used there as someone can remove admin user on its own. + status_code: [200, 404] + validate_certs: no + client_cert: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.cert }}" + client_key: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.key }}" + register: logstash_check_response + until: logstash_check_response is success + retries: 60 + delay: 1 + run_once: true + +- name: ODFE migration | Set logstash user password + uri: + url: "https://{{ inventory_hostname }}:{{ ports.http }}/_opendistro/_security/api/internalusers" + method: PATCH + status_code: [200] + body: + - op: "replace" + path: "/logstash" + value: + password: "{{ specification.logstash_password }}" + reserved: "true" + description: "logstash user" + client_cert: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.cert }}" + client_key: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.key }}" + body_format: json + validate_certs: no + register: uri_response + until: uri_response is success + retries: 5 + delay: 1 + run_once: true + when: logstash_check_response.status == 200 + +- name: ODFE migration | Check the OpenSearch status + command: curl https://{{ inventory_hostname }}:{{ ports.http }}/_cluster/health?pretty -u 'admin:{{ specification.admin_password }}' -k + register: opensearch_status + +- name: ODFE migration | Show the OpenSearch status + debug: + msg: "{{ opensearch_status.stdout }}" + failed_when: "'number_of_nodes' not in opensearch_status.stdout" + +- name: ODFE migration | Reenable shard allocation for the cluster + include_tasks: + file: opensearch/utils/enable-shard-allocation.yml + apply: + delegate_to: "{{ target_inventory_hostname }}" + delegate_facts: true + loop: "{{ ansible_play_hosts_all }}" + loop_control: + loop_var: target_inventory_hostname + vars: + es_api: + cert_type: Epiphany + cert_path: "{{ (certificates.dirs.certs, certificates.files.admin.cert) | path_join }}" + key_path: "{{ (certificates.dirs.certs, certificates.files.admin.key) | path_join }}" + url: https://{{ es_host }}:{{ es_http_port }} + fail_msg: API access test failed. diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-api-access.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-api-access.yml similarity index 85% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-api-access.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-api-access.yml index b9d36e1d9f..c99c75ad72 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-api-access.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-api-access.yml @@ -1,5 +1,5 @@ --- -- name: ODFE | Assert input parameters +- name: OpenSearch | Assert input parameters assert: that: - es_api.cert_path is defined @@ -13,7 +13,7 @@ # Sets 'test_api_access' - include_tasks: test-api-access.yml -- name: ODFE | Assert API access +- name: OpenSearch | Assert API access assert: that: test_api_access.status == 200 fail_msg: diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-cert-files-exist.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-cert-files-exist.yml similarity index 89% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-cert-files-exist.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-cert-files-exist.yml index a4ad4f4f60..8166ad52af 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-cert-files-exist.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-cert-files-exist.yml @@ -1,5 +1,5 @@ --- -- name: ODFE | Assert input parameters +- name: OpenSearch | Assert input parameters assert: that: - es_api.cert_path is defined @@ -8,7 +8,7 @@ - es_api.key_path is defined quiet: true -- name: ODFE | Get info on files +- name: OpenSearch | Get info on files stat: path: "{{ item }}" get_attributes: false @@ -20,7 +20,7 @@ - "{{ es_api.key_path }}" # Specific case for custom certificates (we don't know the paths so they have to be specified manually) -- name: ODFE | Assert files exist +- name: OpenSearch | Assert files exist assert: that: stat_result.stat.exists fail_msg: "{{ _custom_cert_fail_msg if (es_api.cert_type == 'custom') else _common_fail_msg }}" diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/create-dual-cert-file.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/create-dual-cert-file.yml similarity index 68% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/create-dual-cert-file.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/create-dual-cert-file.yml index 01946b94f6..316078d694 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/create-dual-cert-file.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/create-dual-cert-file.yml @@ -3,16 +3,16 @@ # - certs_to_concatenate # - target_path -- name: ODFE | Read certificates to concatenate +- name: OpenSearch | Read certificates to concatenate slurp: src: "{{ item }}" register: _files loop: "{{ certs_to_concatenate }}" -- name: ODFE | Create dual root CA transitional file for migration +- name: OpenSearch | Create dual root CA transitional file for migration copy: dest: "{{ target_path }}" content: "{{ _files.results | map(attribute='content') | map('b64decode') | join('') }}" mode: u=rw,g=r,o= owner: root - group: elasticsearch + group: opensearch diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/enable-shard-allocation.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/enable-shard-allocation.yml similarity index 88% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/enable-shard-allocation.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/enable-shard-allocation.yml index 8394d69fa2..4978f10a5a 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/enable-shard-allocation.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/enable-shard-allocation.yml @@ -4,7 +4,7 @@ # - es_api.cert_path # - es_api.key_path -- name: ODFE | Enable shard allocation for the cluster +- name: OpenSearch | Enable shard allocation for the cluster uri: url: "{{ es_api.url }}/_cluster/settings" method: PUT diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-cluster-health.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-cluster-health.yml similarity index 89% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-cluster-health.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-cluster-health.yml index 9c0079f468..fae3164ded 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-cluster-health.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-cluster-health.yml @@ -4,7 +4,7 @@ # - es_api.cert_path # - es_api.key_path -- name: ODFE | Get cluster health +- name: OpenSearch | Get cluster health uri: url: "{{ es_api.url }}/_cluster/health" method: GET diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-config-from-files.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-config-from-files.yml similarity index 69% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-config-from-files.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-config-from-files.yml index 814087368c..8678908038 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-config-from-files.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-config-from-files.yml @@ -1,17 +1,17 @@ --- # Sets facts on existing configuration -- name: ODFE | Load /etc/elasticsearch/elasticsearch.yml +- name: OpenSearch | Load /etc/elasticsearch/elasticsearch.yml slurp: src: /etc/elasticsearch/elasticsearch.yml register: _elasticsearch_yml -- name: ODFE | Get Xmx value from /etc/elasticsearch/jvm.options +- name: OpenSearch | Get Xmx value from /etc/elasticsearch/jvm.options command: grep -oP '(?<=^-Xmx)\d+[kKmMgG]?' /etc/elasticsearch/jvm.options register: _grep_xmx changed_when: false -- name: ODFE | Set existing configuration facts +- name: OpenSearch | Set existing configuration facts set_fact: existing_config: main: "{{ _elasticsearch_yml.content | b64decode | from_yaml }}" diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/prepare-cluster-for-node-restart.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/prepare-cluster-for-node-restart.yml similarity index 91% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/prepare-cluster-for-node-restart.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/prepare-cluster-for-node-restart.yml index 34bebc59cb..9a0c6ff977 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/prepare-cluster-for-node-restart.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/prepare-cluster-for-node-restart.yml @@ -16,7 +16,7 @@ body_format: json block: # It's safe to run this task many times regardless of the state - - name: ODFE | Disable shard allocation for the cluster + - name: OpenSearch | Disable shard allocation for the cluster uri: url: "{{ es_api.url }}/_cluster/settings" method: PUT @@ -35,7 +35,7 @@ # In epicli 0.7.x there is ES 7.3.2 but this step is optional. - name: Handle flush failure block: - - name: ODFE | Perform a synced flush (optional step) + - name: OpenSearch | Perform a synced flush (optional step) uri: url: "{{ es_api.url }}/_flush" method: POST @@ -46,7 +46,7 @@ retries: 120 delay: 1 rescue: - - name: ODFE | Print warning + - name: OpenSearch | Print warning debug: msg: - "WARNING: flush command failed" diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/restart-node.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/restart-node.yml similarity index 74% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/restart-node.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/restart-node.yml index c6348f7ee9..ee5c496756 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/restart-node.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/restart-node.yml @@ -10,18 +10,18 @@ # - daemon_reload # - skip_waiting_for_status -- name: ODFE | Restart elasticsearch service +- name: OpenSearch | Restart elasticsearch service systemd: - name: elasticsearch + name: opensearch state: restarted daemon_reload: "{{ daemon_reload | default(omit) }}" -- name: ODFE | Wait for Elasticsearch transport port to become available +- name: OpenSearch | Wait for Elasticsearch transport port to become available wait_for: port: "{{ es_transport_port }}" host: "{{ hostvars[target_inventory_hostname].es_host }}" -- name: ODFE | Wait for Elasticsearch http port to become available +- name: OpenSearch | Wait for Elasticsearch http port to become available wait_for: port: "{{ es_http_port }}" host: "{{ hostvars[target_inventory_hostname].es_host }}" diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/save-initial-cluster-status.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/save-initial-cluster-status.yml similarity index 58% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/save-initial-cluster-status.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/save-initial-cluster-status.yml index 9050c7799a..cd6253396c 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/save-initial-cluster-status.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/save-initial-cluster-status.yml @@ -1,7 +1,7 @@ --- -- name: ODFE | Get size of upgrade state file +- name: OpenSearch | Get size of upgrade state file stat: - path: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}" + path: "{{ opensearch.upgrade_state_file_path }}" get_attributes: false get_checksum: false get_mime: false @@ -12,7 +12,7 @@ block: - include_tasks: get-cluster-health.yml - - name: ODFE | Save cluster health to upgrade state file + - name: OpenSearch | Save cluster health to upgrade state file copy: content: "{{ cluster_health.json }}" - dest: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}" + dest: "{{ opensearch.upgrade_state_file_path }}" diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/test-api-access.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/test-api-access.yml similarity index 83% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/test-api-access.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/test-api-access.yml index 8d8495e525..cb8e49d961 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/test-api-access.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/test-api-access.yml @@ -5,7 +5,7 @@ # - es_api.key_path # - es_api.url -- name: ODFE | Test API access using {{ es_api.cert_type }} certificate +- name: OpenSearch | Test API access using {{ es_api.cert_type }} certificate uri: client_cert: "{{ es_api.cert_path }}" client_key: "{{ es_api.key_path }}" diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-cluster-status.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-cluster-status.yml similarity index 93% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-cluster-status.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-cluster-status.yml index 496198a4a0..78615ea41c 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-cluster-status.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-cluster-status.yml @@ -5,7 +5,7 @@ # - es_api.key_path # - expected_status (type: list, e.g. [ 'green', 'yellow' ]) -- name: ODFE | Wait for '{{ expected_status | join("' or '") }}' cluster health status +- name: OpenSearch | Wait for '{{ expected_status | join("' or '") }}' cluster health status uri: url: "{{ es_api.url }}/_cluster/health" client_cert: "{{ es_api.cert_path }}" diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-node-to-join.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-node-to-join.yml similarity index 88% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-node-to-join.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-node-to-join.yml index fcb039654c..82bf3ef35c 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-node-to-join.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-node-to-join.yml @@ -6,7 +6,7 @@ # - target_inventory_hostname # - hostvars[target_inventory_hostname].es_node_name -- name: ODFE | Wait for Elasticsearch node to join the cluster +- name: OpenSearch | Wait for Elasticsearch node to join the cluster uri: url: "{{ es_api.url }}/_cat/nodes?h=name" method: GET diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-shard-allocation.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-shard-allocation.yml similarity index 95% rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-shard-allocation.yml rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-shard-allocation.yml index 0175d1b2d5..2517d57286 100644 --- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-shard-allocation.yml +++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-shard-allocation.yml @@ -4,7 +4,7 @@ # - es_api.cert_path # - es_api.key_path -- name: ODFE | Wait for the cluster to finish shard allocation +- name: OpenSearch | Wait for the cluster to finish shard allocation uri: url: "{{ es_api.url }}/_cluster/health" method: GET diff --git a/ansible/playbooks/upgrade.yml b/ansible/playbooks/upgrade.yml index 6639327668..8b14dd745d 100644 --- a/ansible/playbooks/upgrade.yml +++ b/ansible/playbooks/upgrade.yml @@ -138,75 +138,30 @@ # === logging === -# Some pre-upgrade tasks can be run in parallel (what saves time) while others must be run in serial (to support rolling upgrades). -# Such a separation in Ansible can be applied only at play level thus we have two plays below. - -# play 1/2: pre-upgrade parallel tasks -- hosts: logging - become: true - become_method: sudo - tasks: - - include_role: - name: upgrade - tasks_from: opendistro_for_elasticsearch-01 - when: "'logging' in upgrade_components or upgrade_components|length == 0" - vars: - current_group_name: logging - -# play 2/2: serial tasks - hosts: logging become: true become_method: sudo - gather_facts: false # gathered by previous play - serial: 1 tasks: - include_role: name: upgrade - tasks_from: opendistro_for_elasticsearch-02 + tasks_from: opensearch when: "'logging' in upgrade_components or upgrade_components|length == 0" vars: current_group_name: logging -# === opendistro_for_elasticsearch === - -# Some pre-upgrade tasks can be run in parallel (what saves time) while others must be run in serial (to support rolling upgrades). -# Such a separation in Ansible can be applied only at play level thus we have two plays below. +# === opensearch === -# play 1/2: parallel tasks -- hosts: opendistro_for_elasticsearch +- hosts: opensearch become: true become_method: sudo tasks: - include_role: name: upgrade - tasks_from: opendistro_for_elasticsearch-01 - when: "'opendistro_for_elasticsearch' in upgrade_components or upgrade_components|length == 0" + tasks_from: opensearch + when: "'opensearch' in upgrade_components or upgrade_components|length == 0" vars: - current_group_name: opendistro_for_elasticsearch + current_group_name: opensearch -# play 2/2: serial tasks -- hosts: opendistro_for_elasticsearch - become: true - become_method: sudo - gather_facts: false # gathered by previous play - serial: 1 - tasks: - - include_role: - name: upgrade - tasks_from: opendistro_for_elasticsearch-02 - when: "'opendistro_for_elasticsearch' in upgrade_components or upgrade_components|length == 0" - vars: - current_group_name: opendistro_for_elasticsearch - -- hosts: kibana - become: true - become_method: sudo - serial: 1 - tasks: - - import_role: - name: upgrade - tasks_from: kibana - when: "'kibana' in upgrade_components or upgrade_components|length == 0" - hosts: grafana become: true diff --git a/cli/epicli.py b/cli/epicli.py index f6af5c9fa1..0e0129b101 100644 --- a/cli/epicli.py +++ b/cli/epicli.py @@ -260,12 +260,12 @@ def upgrade_parser(subparsers): 'jmx_exporter', 'kafka', 'kafka_exporter', - 'kibana', + 'opensearch_dashboards', 'kubernetes', 'load_balancer', 'logging', 'node_exporter', - 'opendistro_for_elasticsearch', + 'opensearch', 'postgresql', 'postgres_exporter', 'prometheus', diff --git a/docs/architecture/logical-view.md b/docs/architecture/logical-view.md index 47d9acde34..ab3a65c922 100644 --- a/docs/architecture/logical-view.md +++ b/docs/architecture/logical-view.md @@ -51,14 +51,14 @@ Source | Purpose /var/log/zookeeper/version-2/* | Zookeeper's logs Containers | Kubernetes components that run in a container -`Filebeat`, unlike `Grafana`, pushes data to database (`Elasticsearch`) instead of pulling them. +`Filebeat`, unlike `Grafana`, pushes data to database (`OpenSearch`) instead of pulling them. [Read more](https://www.elastic.co/products/beats/filebeat) about `Filebeat`. -### Elasticsearch +### OpenSearch -`Elasticsearch` is highly scalable and full-text search enabled analytics engine. Epiphany Platform uses it for storage and analysis of logs. +`OpenSearch` is highly scalable and full-text search enabled analytics engine. Epiphany Platform uses it for storage and analysis of logs. -[Read more](https://www.elastic.co/guide/en/elasticsearch/reference/7.x/index.html) +[Read more](https://opensearch.org/docs/latest) ### Elasticsearch Curator @@ -66,11 +66,11 @@ Containers | Kubernetes components that run in a container [Read more](https://www.elastic.co/guide/en/elasticsearch/client/curator/5.8/index.html) -### Kibana +### OpenSearch Dashboards -`Kibana` like `Grafana` is used in Epiphany for visualization, in addition it has full text search capabilities. `Kibana` uses `Elasticsearch` as datasource for logs, it allows to create full text queries, dashboards and analytics that are performed on logs. +`OpenSearch Dashboards` like `Grafana` is used in Epiphany for visualization. It uses `OpenSearch` as datasource for logs, it allows to create full text queries, dashboards and analytics that are performed on logs. -[Read more](https://www.elastic.co/products/kibana) +[Read more](https://opensearch.org/docs/latest/dashboards/index/) ## Computing diff --git a/docs/architecture/process-view.md b/docs/architecture/process-view.md index 366bb2ee83..a124c7fd16 100644 --- a/docs/architecture/process-view.md +++ b/docs/architecture/process-view.md @@ -24,8 +24,8 @@ metrics from different kinds of exporters. ## Logging -Epiphany uses `Elasticsearch` as key-value database with `Filebeat` for gathering logs and `Kibana` as user interface to write queries and analyze logs. +Epiphany uses `OpenSearch` as key-value database with `Filebeat` for gathering logs and `OpenSearch Dashboards` as user interface to write queries and analyze logs. ![Logging process view](diagrams/process-view/logging-process-view.svg) -`Filebeat` gathers OS and application logs and ships them to `Elasticsearch`. Queries from `Kibana` are run against `Elasticsearch` key-value database. \ No newline at end of file +`Filebeat` gathers OS and application logs and ships them to `OpenSearch`. Queries from `Kibana` are run against `OpenSearch` key-value database. \ No newline at end of file diff --git a/docs/changelogs/CHANGELOG-0.5.md b/docs/changelogs/CHANGELOG-0.5.md index 9f1a8f9e36..9acb3929a2 100644 --- a/docs/changelogs/CHANGELOG-0.5.md +++ b/docs/changelogs/CHANGELOG-0.5.md @@ -82,7 +82,7 @@ - [#381](https://github.com/epiphany-platform/epiphany/issues/381) - Add AWS EC2 Root Volume encryption - [#782](https://github.com/epiphany-platform/epiphany/issues/781) - All disks encryption documentation - AWS - [#782](https://github.com/epiphany-platform/epiphany/issues/782) - All disks encryption documentation - Azure -- [#784](https://github.com/epiphany-platform/epiphany/issues/784) - Switch to Open Distro for Elasticsearch +- [#784](https://github.com/epiphany-platform/epiphany/issues/784) - Switch to Open Distro for ElasticSearch - [Data storage](/docs/home/howto/DATABASES.md#how-to-start-working-with-opendistro-for-elasticsearch) - [Centralized logging](/docs/home/howto/LOGGING.md#centralized-logging-setup) diff --git a/docs/changelogs/CHANGELOG-2.0.md b/docs/changelogs/CHANGELOG-2.0.md index 024d79c693..ffe2fc8e5d 100644 --- a/docs/changelogs/CHANGELOG-2.0.md +++ b/docs/changelogs/CHANGELOG-2.0.md @@ -3,7 +3,7 @@ ## [2.0.0] YYYY-MM-DD ### Added - +- [#2870](https://github.com/epiphany-platform/epiphany/issues/2870) - OpenDistro for ElasticSearch project migrated to OpenSearch - [#959](https://github.com/epiphany-platform/epiphany/issues/959) - Add usage of use_network_security_groups to disable NSG on AWS - [#2701](https://github.com/epiphany-platform/epiphany/issues/2701) - Epicli prepare - generate files in separate directory - [#2812](https://github.com/epiphany-platform/epiphany/issues/2812) - Extend K8s config validation @@ -50,6 +50,7 @@ - [#2833](https://github.com/epiphany-platform/epiphany/issues/2833) - Removal of Logstash component - [#2836](https://github.com/epiphany-platform/epiphany/issues/2836) - Removal of Istio component - [#2837](https://github.com/epiphany-platform/epiphany/issues/2837) - Removal of Apache Ignite component +- [#2870](https://github.com/epiphany-platform/epiphany/issues/2870) - Migration of OpenDistro for ElasticSearch - [#2927](https://github.com/epiphany-platform/epiphany/issues/2927) - Review Epiphany tools (remove outdated) ### Deprecated diff --git a/docs/design-docs/health-monitor/health-monitor.md b/docs/design-docs/health-monitor/health-monitor.md deleted file mode 100644 index f66170f3b3..0000000000 --- a/docs/design-docs/health-monitor/health-monitor.md +++ /dev/null @@ -1,33 +0,0 @@ -# Epiphany Health Monitor service design proposal - -Affected version: 0.6.x/0.7.x - -## Goals - -Provide service that will be monitoring components (Kubernetes, Docker, Kafka, EFK, Prometheus, etc.) deployed using Epiphany. - -## Use cases - -Service will be installed and used on Virtual Machines/Bare Metal on Ubuntu and RedHat (systemd service). -Health Monitor will check status of components that were installed on the cluster. Combinations of those components can be different and will be provided to the service through configuration file. - -Components that Health Monitor should check: -- Kubernetes (kubelet)* -- Query Kubernetes health endpoint (/healthz)* -- Docker* -- Query Docker stats* -- PostgreSQL -- HAProxy -- Prometheus -- Kafka -- ZooKeeper -- ElasticSearch -- RabbitMQ - -`*` means MVP version. - -Health Monitor exposes endpoint that is compliant with [Prometheus metrics format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-format-example) and serves data about health checks. This endpoint should listen on the configurable port (default 98XX). - -## Design proposal - -TODO \ No newline at end of file diff --git a/docs/home/ARM.md b/docs/home/ARM.md index 94466f141b..9d9cd34fb8 100644 --- a/docs/home/ARM.md +++ b/docs/home/ARM.md @@ -29,7 +29,7 @@ Besides making sure that the selected providers, operating systems, components a | monitoring | :heavy_check_mark: | :x: | :x: | | load_balancer | :heavy_check_mark: | :x: | :x: | | postgresql | :heavy_check_mark: | :x: | :x: | -| opendistro_for_elasticsearch | :heavy_check_mark: | :x: | :x: | +| opensearch | :heavy_check_mark: | :x: | :x: | | single_machine | :heavy_check_mark: | :x: | :x: | ***Notes*** @@ -92,9 +92,9 @@ specification: rabbitmq: count: 2 machine: rabbitmq-machine-arm - opendistro_for_elasticsearch: + opensearch: count: 1 - machine: opendistro-machine-arm + machine: opensearch-machine-arm repository: count: 1 machine: repository-machine-arm @@ -164,7 +164,7 @@ specification: ip: x.x.x.x --- kind: infrastructure/virtual-machine -name: opendistro-machine-arm +name: opensearch-machine-arm provider: any based_on: logging-machine specification: @@ -317,7 +317,7 @@ specification: - address_pool: 10.1.8.0/24 opendistro_for_elasticsearch: count: 1 - machine: opendistro-machine-arm + machine: opensearch-machine-arm subnets: - address_pool: 10.1.10.0/24 repository: @@ -390,7 +390,7 @@ specification: size: a1.medium --- kind: infrastructure/virtual-machine -name: opendistro-machine-arm +name: opensearch-machine-arm provider: aws based_on: logging-machine specification: diff --git a/docs/home/COMPONENTS.md b/docs/home/COMPONENTS.md index 23f77bb4c0..640e6fd87c 100644 --- a/docs/home/COMPONENTS.md +++ b/docs/home/COMPONENTS.md @@ -19,10 +19,6 @@ Note that versions are default versions and can be changed in certain cases thro | RabbitMQ | 3.8.9 | https://github.com/rabbitmq/rabbitmq-server | [Mozilla Public License](https://www.mozilla.org/en-US/MPL/) | | Docker CE | 20.10.8 | https://docs.docker.com/engine/release-notes/ | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | | KeyCloak | 14.0.0 | https://github.com/keycloak/keycloak | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | -| Elasticsearch OSS | 7.10.2 | https://github.com/elastic/elasticsearch | https://github.com/elastic/elasticsearch/blob/master/LICENSE.txt | -| Elasticsearch Curator OSS | 5.8.3 | https://github.com/elastic/curator | https://github.com/elastic/curator/blob/master/LICENSE.txt | -| Opendistro for Elasticsearch | 1.13.x | https://opendistro.github.io/for-elasticsearch/ | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | -| Opendistro for Elasticsearch Kibana | 1.13.1 | https://opendistro.github.io/for-elasticsearch-docs/docs/kibana/ | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | | Filebeat | 7.9.2 | https://github.com/elastic/beats | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | | Filebeat Helm Chart | 7.9.2 | https://github.com/elastic/helm-charts | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | | Prometheus | 2.31.1 | https://github.com/prometheus/prometheus | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) | @@ -180,6 +176,8 @@ Note that versions are default versions and can be changed in certain cases thro | msrest | 0.6.21 | https://github.com/Azure/msrest-for-python | [MIT License](https://api.github.com/repos/azure/msrest-for-python/license) | | msrestazure | 0.6.4 | https://github.com/Azure/msrestazure-for-python | [MIT License](https://api.github.com/repos/azure/msrestazure-for-python/license) | | oauthlib | 3.1.1 | https://github.com/oauthlib/oauthlib | [BSD 3-Clause "New" or "Revised" License](https://api.github.com/repos/oauthlib/oauthlib/license) | +| OpenSearch | 1.2.4 | https://github.com/opensearch-project/OpenSearch | [Apache License 2.0](https://www.apache.org/licenses/) | +| OpenSearch Dashboards | 1.2.0 | https://github.com/opensearch-project/OpenSearch-Dashboards | [Apache License 2.0](https://www.apache.org/licenses/) | | packaging | 20.9 | https://github.com/pypa/packaging | [Other](https://api.github.com/repos/pypa/packaging/license) | | paramiko | 2.9.2 | https://paramiko.org | LGPL | | pathlib2 | 2.3.6 | https://github.com/mcmtroffaes/pathlib2 | [MIT License](https://api.github.com/repos/mcmtroffaes/pathlib2/license) | diff --git a/docs/home/HOWTO.md b/docs/home/HOWTO.md index 74395ab1bc..30fd1d73c4 100644 --- a/docs/home/HOWTO.md +++ b/docs/home/HOWTO.md @@ -34,8 +34,8 @@ - [How to configure scalable Prometheus setup](./howto/MONITORING.md#how-to-configure-scalable-prometheus-setup) - [Import and create Grafana dashboards](./howto/MONITORING.md#import-and-create-grafana-dashboards) - [How to setup default admin password and user in Grafana](./howto/MONITORING.md#how-to-setup-default-admin-password-and-user-in-grafana) - - [How to configure Kibana - Open Distro](./howto/MONITORING.md#how-to-configure-kibana---open-distro) - - [How to configure default user passwords for Kibana - Open Distro, Open Distro for Elasticsearch and Filebeat](./howto/MONITORING.md#how-to-configure-default-user-passwords-for-kibana---open-distro-open-distro-for-elasticsearch-and-filebeat) + - [How to configure OpenSearch Dashboards](./howto/MONITORING.md#how-to-configure-opensearch-dashboards) + - [How to configure default passwords for service users in OpenSearch Dashboards, OpenSearch and Filebeat](./howto/MONITORING.md#how-to-configure-default-passwords-for-service-users-in-opensearch-dashboards-opensearch-and-filebeat) - [How to configure scalable Prometheus setup](./howto/MONITORING.md#how-to-configure-scalable-prometheus-setup) - [How to configure Azure additional monitoring and alerting](./howto/MONITORING.md#how-to-configure-azure-additional-monitoring-and-alerting) - [How to configure AWS additional monitoring and alerting](./howto/MONITORING.md#how-to-configure-aws-additional-monitoring-and-alerting) @@ -59,6 +59,7 @@ - [Run apply after upgrade](./howto/UPGRADE.md#run-apply-after-upgrade) - [Kubernetes applications](./howto/UPGRADE.md#kubernetes-applications) - [Kafka upgrade](./howto/UPGRADE.md#how-to-upgrade-kafka) + - [Migration from Open Distro for Elasticsearch to OpenSearch](./howto/UPGRADE.md#migration-from-open-distro-for-elasticsearch--kibana-to-opensearch-and-opensearch-dashboards) - [Open Distro for Elasticsearch upgrade](./howto/UPGRADE.md#open-distro-for-elasticsearch-upgrade) - [Node exporter upgrade](./howto/UPGRADE.md#node-exporter-upgrade) - [RabbitMQ upgrade](./howto/UPGRADE.md#rabbitmq-upgrade) diff --git a/docs/home/RESOURCES.md b/docs/home/RESOURCES.md index 03dac4c716..75adb34694 100644 --- a/docs/home/RESOURCES.md +++ b/docs/home/RESOURCES.md @@ -42,8 +42,8 @@ Here are some materials concerning Epiphany tooling and cluster components - bot 2. [RabbitMQ](https://www.rabbitmq.com/) - [RabbitMQ Getting started](https://www.rabbitmq.com/getstarted.html) 5. Central logging - 1. [Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) - 2. [Kibana](https://www.elastic.co/guide/en/kibana/current/index.html) + 1. [OpenSearch Dashboards](https://opensearch.org/docs/latest/dashboards/index/) + 2. [OpenSearch](https://opensearch.org/docs/latest) 3. [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html) - Beats platform reference(https://www.elastic.co/guide/en/beats/libbeat/current/index.html) 6. Load Balancing diff --git a/docs/home/SECURITY.md b/docs/home/SECURITY.md index e66633969e..df2fe12407 100644 --- a/docs/home/SECURITY.md +++ b/docs/home/SECURITY.md @@ -11,8 +11,12 @@ changes made in settings of your antivirus/antimalware solution. ## Contents -- [Users and roles created by Epiphany](#users-and-roles-created-by-epiphany) -- [Ports used by components in Epiphany](#ports-used-by-components-in-epiphany) +- [Security related information](#security-related-information) + - [Contents](#contents) + - [Users and roles created by epiphany](#users-and-roles-created-by-epiphany) + - [Ports used by components in Epiphany](#ports-used-by-components-in-epiphany) + - [Connection protocols and ciphers used by components in Epiphany](#connection-protocols-and-ciphers-used-by-components-in-epiphany) + - [Notes](#notes) ### Users and roles created by epiphany @@ -61,15 +65,15 @@ different values. The list does not include ports that are bound to the loopback - 9093 - encrypted communication (if TLS/SSL is enabled) - unconfigurable random port from ephemeral range - JMX (for local access only), see note [[1]](#notes) -5. Elasticsearch: +5. OpenSearch: - - 9200 - Elasticsearch REST communication - - 9300 - Elasticsearch nodes communication + - 9200 - OpenSearch REST communication + - 9300 - OpenSearch nodes communication - 9600 - Performance Analyzer (REST API) -6. Kibana: +6. OpenSearch Dashboards: - - 5601 - Kibana web UI + - 5601 - OpenSearch Dashboards web UI 7. Prometheus: diff --git a/docs/home/howto/BACKUP.md b/docs/home/howto/BACKUP.md index 45ee9378dc..14f84c7d06 100644 --- a/docs/home/howto/BACKUP.md +++ b/docs/home/howto/BACKUP.md @@ -125,11 +125,11 @@ Recovery includes all backed up files Logging backup includes: -- Elasticsearch database snapshot -- Elasticsearch configuration ``/etc/elasticsearch/`` -- Kibana configuration ``/etc/kibana/`` +- OpenSearch database snapshot +- OpenSearch configuration ``/usr/share/opensearch/config/`` +- OpenSearch Dashboards configuration ``/usr/share/opensearch-dashboards/config/`` -Only single-node Elasticsearch backup is supported. Solution for multi-node Elasticsearch cluster will be added in +Only single-node OpenSearch backup is supported. Solution for multi-node OpenSearch cluster will be added in future release. ### Monitoring diff --git a/docs/home/howto/CLUSTER.md b/docs/home/howto/CLUSTER.md index ac4663a1b4..c11fc30072 100644 --- a/docs/home/howto/CLUSTER.md +++ b/docs/home/howto/CLUSTER.md @@ -645,7 +645,7 @@ specification: count: 0 rabbitmq: count: 0 - opendistro_for_elasticsearch: + opensearch: count: 0 single_machine: count: 1 @@ -827,7 +827,7 @@ Kubernetes master | :heavy_check_mark: | :x: | :heavy_check_mark: | :heavy_check Kubernetes node | :heavy_check_mark: | :x: | :heavy_check_mark: | :heavy_check_mark: | [#1580](https://github.com/epiphany-platform/epiphany/issues/1580) Kafka | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | --- Load Balancer | :heavy_check_mark: | :heavy_check_mark: | :x: | :x: | --- -Opendistro for elasticsearch | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | --- +OpenSearch | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | --- Postgresql | :x: | :x: | :heavy_check_mark: | :heavy_check_mark: | [#1577](https://github.com/epiphany-platform/epiphany/issues/1577) RabbitMQ | :heavy_check_mark: | :heavy_check_mark: | :x: | :heavy_check_mark: | [#1578](https://github.com/epiphany-platform/epiphany/issues/1578), [#1309](https://github.com/epiphany-platform/epiphany/issues/1309) RabbitMQ K8s | :heavy_check_mark: | :heavy_check_mark: | :x: | :heavy_check_mark: | [#1486](https://github.com/epiphany-platform/epiphany/issues/1486) diff --git a/docs/home/howto/DATABASES.md b/docs/home/howto/DATABASES.md index cdb5e8a67f..cc6177a3a0 100644 --- a/docs/home/howto/DATABASES.md +++ b/docs/home/howto/DATABASES.md @@ -455,11 +455,10 @@ Properly configured application (kubernetes service) to use fully HA configurati PostgreSQL native replication is now deprecated and removed. Use [PostgreSQL HA replication with repmgr](#how-to-set-up-postgresql-ha-replication-with-repmgr-cluster) instead. -## How to start working with OpenDistro for Elasticsearch +## How to start working with OpenSearch -OpenDistro for Elasticsearch -is [an Apache 2.0-licensed distribution of Elasticsearch enhanced with enterprise security, alerting, SQL](https://opendistro.github.io/for-elasticsearch/). -In order to start working with OpenDistro change machines count to value greater than 0 in your cluster configuration: +OpenSearch is the [successor](https://opendistro.github.io/for-elasticsearch-docs/) of OpenDistro for ElasticSearch project. Epiphany is providing an [automated solution](./UPGRADE.md#migration-from-open-distro-for-elasticsearch--kibana-to-opensearch-and-opensearch-dashboards) for migrating your existing ODFE installation to OpenSearch. +On the other hand, if you plan to just start working with OpenSearch - change machines count to value greater than 0 in your cluster configuration: ```yaml kind: epiphany-cluster @@ -475,22 +474,22 @@ specification: ... logging: count: 1 - opendistro_for_elasticsearch: + opensearch: count: 2 ``` -**Installation with more than one node will always be clustered** - Option to configure the non-clustered installation of more than one node for Open Distro is not supported. +**Installation with more than one node will always be clustered** - Option to configure the non-clustered installation of more than one node for OpenSearch is not supported. ```yaml -kind: configuration/opendistro-for-elasticsearch -title: OpenDistro for Elasticsearch Config +kind: configuration/opensearch +title: OpenSearch Config name: default specification: - cluster_name: EpiphanyElastic + cluster_name: EpiphanyOpenSearch ``` -By default, Kibana is deployed only for `logging` component. If you want to deploy Kibana -for `opendistro_for_elasticsearch` you have to modify feature mapping. Use below configuration in your manifest. +By default, OpenSearch Dashboards ( previously Kibana component ) is deployed only for `logging` component. If you want to deploy it +for `opensearch` component you have to modify feature mapping. Use below configuration in your manifest: ```yaml kind: configuration/feature-mapping @@ -498,12 +497,11 @@ title: "Feature mapping to roles" name: default specification: roles_mapping: - opendistro_for_elasticsearch: - - opendistro-for-elasticsearch + opensearch: - node-exporter - filebeat - firewall - - kibana + - opensearch-dashboards ``` -Filebeat running on `opendistro_for_elasticsearch` hosts will always point to centralized logging hosts (./LOGGING.md). +Filebeat running on `opensearch` hosts will always point to centralized logging hosts ( [more info](./LOGGING.md) ). diff --git a/docs/home/howto/LOGGING.md b/docs/home/howto/LOGGING.md index e419b2a543..93921b769e 100644 --- a/docs/home/howto/LOGGING.md +++ b/docs/home/howto/LOGGING.md @@ -1,119 +1,124 @@ # Centralized logging setup -For centralized logging Epiphany uses [OpenDistro for Elasticsearch](https://opendistro.github.io/for-elasticsearch/). -In order to enable centralized logging, be sure that `count` property for `logging` feature is greater than 0 in your +For centralized logging Epiphany uses [Open Search](https://opensearch.org/) stack - an opensource successor[1] of Elasticsearch & Kibana projects. + +In order to enable centralized logging, be sure to set `count` property for `logging` feature to the value greater than 0 in your configuration manifest. ```yaml kind: epiphany-cluster -... +[...] specification: - ... + [...] components: kubernetes_master: count: 1 kubernetes_node: count: 0 - ... + [...] logging: - count: 1 - ... + count: 1 # <<------ + [...] ``` ## Default feature mapping for logging +Below example shows a default feature mapping for logging: ```yaml -... -logging: - - logging - - kibana - - node-exporter - - filebeat - - firewall +[...] +roles_mapping: +[...] + logging: + - logging + - opensearch-dashboards + - node-exporter + - filebeat + - firewall ... ``` -The `logging` role replaced `elasticsearch` role. This change was done to enable Elasticsearch usage also for data +The `logging` role has replaced `elasticsearch` role. This change was done to enable Elasticsearch usage also for data storage - not only for logs as it was till 0.5.0. -Default configuration of `logging` and `opendistro_for_elasticsearch` roles is identical ( -./DATABASES.md#how-to-start-working-with-opendistro-for-elasticsearch). To modify configuration of centralized logging -adjust and use the following defaults in your manifest: +Default configuration of `logging` and `opensearch` roles is identical ( more info [here](./DATABASES.md#how-to-start-working-with-opensearch) ). To modify configuration of centralized logging +adjust to your needs the following default values in your manifest: ```yaml +[...] kind: configuration/logging title: Logging Config name: default specification: - cluster_name: EpiphanyElastic + cluster_name: EpiphanyOpensearch clustered: True paths: - data: /var/lib/elasticsearch - repo: /var/lib/elasticsearch-snapshots - logs: /var/log/elasticsearch + data: /var/lib/opensearch + repo: /var/lib/opensearch-snapshots + logs: /var/log/opensearch ``` -## How to manage Opendistro for Elasticsearch data - -Elasticsearch stores data using JSON documents, and an Index is a collection of documents. As in every database, it's +## How to manage OpenSearch data +OpenSearch stores data using JSON documents, and an Index is a collection of documents. As in every database, it's crucial to correctly maintain data in this one. It's almost impossible to deliver database configuration which will fit -to every type of project and data stored in. Epiphany deploys preconfigured Opendistro Elasticsearch, but this -configuration may not meet user requirements. Before going to production, configuration should be tailored to the +to every type of project and data stored in. Epiphany deploys preconfigured OpenSearch instance but this +configuration may not meet any single user requirements. That's why, before going to production, stack configuration should be tailored to the project needs. All configuration tips and tricks are available -in [official documentation](https://opendistro.github.io/for-elasticsearch-docs/). +in [official documentation](https://opensearch.org/docs/latest). -The main and most important decisions to take before you deploy cluster are: +The main and most important decisions to take before you deploy the cluster are: -1) How many Nodes are needed -2) How big machines and/or storage data disks need to be used + - how many nodes are needed + - how big machines and/or storage data disks need to be used -These parameters are defined in yaml file, and it's important to create a big enough cluster. +These parameters can be defined in manifest yaml file. It is important to create a big enough cluster. ```yaml specification: + [..] components: logging: - count: 1 # Choose number of nodes + count: 1 # Choose number of nodes that suits your needs + machines: + - logging-machine-n + [..] --- kind: infrastructure/virtual-machine title: "Virtual Machine Infra" -name: logging-machine +name: logging-machine-n specification: - size: Standard_DS2_v2 # Choose machine size + size: Standard_DS2_v2 # Choose a VM size that suits your needs ``` -If it's required to have Elasticsearch which works in cluster formation configuration, except setting up more than one +If it's required to have OpenSearch instance which works in cluster formation configuration, except setting up more than one machine in yaml config file please acquaint dedicated -support [article](https://opendistro.github.io/for-elasticsearch-docs/docs/elasticsearch/cluster/) and adjust -Elasticsearch configuration file. +support [article](https://opensearch.org/docs/latest/troubleshoot/index/) and adjust +OpenSearch configuration file. -At this moment Opendistro for Elasticsearch does not support plugin similar -to [ILM](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-lifecycle-management.html), log rotation -is possible only by configuration created in Index State Management. +We also want to strongly encourage you to get familiar with a bunch of plugins and policies available along with OpenSearch with the following ones among them: -`ISM - Index State Management` - is a plugin that provides users and administrative panel to monitor the indices and +`ISM - Index State Management` - is a plugin that allows users and administrative panel to monitor the indices and apply policies at different index stages. ISM lets users automate periodic, administrative operations by triggering them based on index age, size, or number of documents. Using the ISM plugin, can define policies that automatically handle -index rollovers or deletions. ISM is installed with Opendistro by default - user does not have to enable this. Official +index rollovers or deletions. Official plugin documentation is available -in [Opendistro for Elasticsearch website](https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/). +[here](https://opensearch.org/docs/latest/im-plugin/ism/index/). To reduce the consumption of disk resources, every index you created should use -well-designed [policy](https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/policies/). +well-designed [policy](https://opensearch.org/docs/latest/im-plugin/ism/policies/). Among others these two index actions might save machine from filling up disk space: -[`Index Rollover`](https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/policies/#rollover) - rolls an alias +[`Index Rollover`](https://opensearch.org/docs/latest/im-plugin/ism/policies/#rollover) - rolls an alias to a new index. Set up correctly max index size / age or minimum number of documents to keep index size in requirements framework. -[`Index Deletion`](https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/policies/#delete) - deletes indexes +[`Index Deletion`](https://opensearch.org/docs/latest/im-plugin/ism/policies/#delete) - deletes indexes managed by policy -Combining these actions, adapting them to data amount and specification users are able to create policy which will -maintain data in cluster for example: to secure node from fulfilling disk space. +Combining these actions and adapting them to data amount and specification, users are able to create policy which will +maintain their data in cluster for example to secure node from fulfilling disk space. -There is example of policy below. Be aware that this is only example, and it needs to be adjusted to environment needs. +There is an example of such policy below. Be aware that this is only example and as avery example it needs to be adjusted to actual environment needs. ```json { @@ -181,64 +186,66 @@ There is example of policy below. Be aware that this is only example, and it nee } ``` -Example above shows configuration with rollover daily or when index achieve 1 GB size. Indexes older than 14 days will +Example above shows configuration with rollover index policy on a daily basis or when the index achieve 1 GB size. Indexes older than 14 days will be deleted. States and conditionals could be combined. Please -see [policies](https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/policies/) documentation for more +see [policies](https://opensearch.org/docs/latest/im-plugin/ism/policies//) documentation for more details. -`Apply Policy` +
+ +#### Apply Policy -To apply policy use similar API request as presented below: +To apply a policy you can use similar API request as presented below: ``` -PUT _template/template_01 +PUT _index_template/ism_rollover ``` ```json { "index_patterns": ["filebeat*"], "settings": { - "opendistro.index_state_management.rollover_alias": "filebeat" - "opendistro.index_state_management.policy_id": "epi_policy" + "plugins.index_state_management.rollover_alias": "filebeat" + "plugins.index_state_management.policy_id": "epi_policy" } } ``` After applying this policy, every new index created under this one will apply to it. There is also possibility to apply -policy to already existing policies by assigning them to policy in Index Management Kibana panel. +policy to already existing policies by assigning them to policy in dashboard Index Management panel. -## How to export Kibana reports to CSV format +## How to export Dashboards reports -Since v1.0 Epiphany provides the possibility to export reports from Kibana to CSV, PNG or PDF using the Open Distro for -Elasticsearch Kibana reports feature. +Since v1.0 Epiphany provides the possibility to export reports from Kibana to CSV, PNG or PDF using the Open Distro for Elasticsearch Kibana reports feature. And after migrating from Elastic stack to OpenSearch stack you can make use of the OpenSearch Reporting feature a choieve this and more. -Check more details about the plugin and how to export reports in the -[documentation](https://opendistro.github.io/for-elasticsearch-docs/docs/kibana/reporting) +Check more details about the OpenSearch Reports plugin and how to export reports in the +[documentation](https://github.com/opensearch-project/dashboards-reports/blob/main/README.md#opensearch-dashboards-reports). -`Note: Currently in Open Distro for Elasticsearch Kibana the following plugins are installed and enabled by default: security, alerting, anomaly detection, index management, query workbench, notebooks, reports, alerting, gantt chart plugins.` +Notice: Currently in the OpenSearch stack the following plugins are installed and enabled by default: security, alerting, anomaly detection, index management, query workbench, notebooks, reports, alerting, gantt chart plugins. -You can easily check enabled default plugins for Kibana using the following command on the logging machine: -`./bin/kibana-plugin list` in Kibana directory. +You can easily check enabled default plugins for Dashboards component using the following command on the logging machine: +`./bin/opensearch-dashboards-plugin list` in directory where you've installed _opensearch-dashboards_. --- ## How to add multiline support for Filebeat logs -In order to properly handle multilines in files harvested by Filebeat you have to provide `multiline` definition in the -configuration manifest. Using the following code you will be able to specify which lines are part of a single event. +In order to properly handle multiline outputs in files harvested by Filebeat you have to provide `multiline` definition in the cluster configuration manifest. Using the following code you will be able to specify which lines are part of a single event. By default, postgresql block is provided, you can use it as example: ```yaml +[..] postgresql_input: multiline: pattern: >- '^\d{4}-\d{2}-\d{2} ' negate: true match: after +[..] ``` -Supported inputs: `common_input`,`postgresql_input`,`container_input` +Supported inputs: `common_input`,`postgresql_input`,`container_input`. More details about multiline options you can find in the [official documentation](https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html) @@ -253,19 +260,29 @@ specification: k8s_as_cloud_service: true ``` -## How to use default Kibana dashboards +## How to use default OpenSearch dashboards + +--- +This feature is not working in current version of OpenSearch and so the `setup.dashboards.enabled` is set with value _false_ as a workaround. +--- It is possible to configure `setup.dashboards.enabled` and `setup.dashboards.index` Filebeat settings using `specification.kibana.dashboards` key in `configuration/filebeat` doc. -When `specification.kibana.dashboards.enabled` is set to `auto`, the corresponding setting in Filebeat configuration file will be set to `true` only if Kibana is configured to be present on the host. +When `specification.kibana.dashboards.enabled` is set to `auto`, the corresponding setting in Filebeat configuration file will be set to `true` only if OpenSearch Dashboards component is configured to be present on the host. Other possible values are `true` and `false`. Default configuration: -``` +```yaml specification: +[..] kibana: dashboards: enabled: auto index: filebeat-* ``` -Note: Setting `specification.kibana.dashboards.enabled` to `true` not providing Kibana will result in a Filebeat crash. +Notice: Setting `specification.kibana.dashboards.enabled` to `true` not providing Kibana will result in a Filebeat crash. + +
+ +--- +[1] More information about migrating from Elasticsearch & Kibana to OpenSearch & OpenSearch Dashboards can be found [here](./UPGRADE.md#migration-from-open-distro-for-elasticsearch--kibana-to-opensearch-and-opensearch-dashboards). \ No newline at end of file diff --git a/docs/home/howto/MAINTENANCE.md b/docs/home/howto/MAINTENANCE.md index 52cc3de205..f42ead43c6 100644 --- a/docs/home/howto/MAINTENANCE.md +++ b/docs/home/howto/MAINTENANCE.md @@ -121,12 +121,12 @@ To check status of Node Exporter, use the command: status prometheus-node-exporter ``` -#### - Elasticsearch +#### - OpenSearch -To check status of Elasticsearch, use the command: +To check the status of OpenSearch we can use the command: ```shell -systemct status elasticsearch +systemct status opensearch ``` Check if service is listening on 9200 (API communication port): @@ -141,7 +141,7 @@ Check if service is listening on 9300 (nodes communication port): netstat -antup | grep 9300 ``` -Check status of Elasticsearch cluster: +We can also check the status of OpenSearch cluster: ```shell :9200/_cluster/health diff --git a/docs/home/howto/MONITORING.md b/docs/home/howto/MONITORING.md index 3f2917c2d0..7456c26133 100644 --- a/docs/home/howto/MONITORING.md +++ b/docs/home/howto/MONITORING.md @@ -11,10 +11,10 @@ Grafana: - [How to setup default admin password and user in Grafana](#how-to-setup-default-admin-password-and-user-in-grafana) - [Import and create Grafana dashboards](#import-and-create-grafana-dashboards) -Kibana: +OpenSearch Dashboards: -- [How to configure Kibana](#how-to-configure-kibana) -- [How to configure default user password in Kibana](#how-to-configure-default-user-password-in-kibana) +- [How to configure Dashboards](#how-to-configure-opensearch-dashboards) +- [How to configure default passwords for service users in OpenSearch Dashboards, OpenSearch and Filebeat](#how-to-configure-default-passwords-for-service-users-in-opensearch-dashboards-opensearch-and-filebeat) RabbitMQ: @@ -231,50 +231,50 @@ When dashboard creation or import succeeds you will see it on your dashboard lis *Note: For some dashboards, there is no data to visualize until there is traffic activity for the monitored component.* -# Kibana +# OpenSearch Dashboards -Kibana is an free and open frontend application that sits on top of the Elastic Stack, providing search and data visualization capabilities for data indexed in Elasticsearch. For more informations about Kibana please refer to [the official website](https://www.elastic.co/what-is/kibana). +OpenSearch Dashboards ( a Kibana counterpart ) is an open source search and analytics visualization layer. It also serves as a user interface for many OpenSearch project plugins. For more information please refer to [the official website](https://opensearch.org/docs/latest/dashboards/index/). -## How to configure Kibana - Open Distro +## How to configure OpenSearch Dashboards -In order to start viewing and analyzing logs with Kibana, you first need to add an index pattern for Filebeat according to the following steps: +In order to start viewing and analyzing logs with Dashboards tool, you first need to add an index pattern for Filebeat according to the following procedure: -1. Goto the `Management` tab -2. Select `Index Patterns` -3. On the first step define as index pattern: +1. Goto the `Stack Management` tab +2. Select `Index Patterns` --> `Create index pattern` +3. Define an index pattern: `filebeat-*` - Click next. + and click next. 4. Configure the time filter field if desired by selecting `@timestamp`. This field represents the time that events occurred or were processed. You can choose not to have a time field, but you will not be able to narrow down your data by a time range. -This filter pattern can now be used to query the Elasticsearch indices. +This filter pattern can now be used to query the OpenSsearch indices. -By default Kibana adjusts the UTC time in `@timestamp` to the browser's local timezone. This can be changed in `Management` > `Advanced Settings` > `Timezone for date formatting`. +By default OpenSearch Dashoboards adjusts the UTC time in `@timestamp` to the browser's local timezone. This can be changed in `Stack Management` > `Advanced Settings` > `Timezone for date formatting`. -## How to configure default user passwords for Kibana - Open Distro, Open Distro for Elasticsearch and Filebeat +## How to configure default passwords for service users in OpenSearch Dashboards, OpenSearch and Filebeat -To configure admin password for Kibana - Open Distro and Open Distro for Elasticsearch you need to follow the procedure below. -There are separate procedures for `logging` and `opendistro-for-elasticsearch` roles since most of the times for `opendistro-for-elasticsearch`, `kibanaserver` and `logstash` users are not required to be present. +To configure admin password for OpenSearch Dashoboards ( previously Kibana ) and OpenSearch you need to follow the procedure below. +There are separate procedures for `logging` and `opensearch` roles since for most of the time `opensearch`, `kibanaserver` and `logstash` users are not required to be present. ### Logging component -#### - Logging role +#### Logging role -By default Epiphany removes users that are listed in `demo_users_to_remove` section of `configuration/logging` doc. -By default, `kibanaserver` user (needed by default Epiphany installation of Kibana) and `logstash` (needed by default Epiphany +By default Epiphany removes users that are listed in `demo_users_to_remove` section of `configuration/logging` manifest document. +Additionally, `kibanaserver`[1] user (needed by default Epiphany installation of Dashooards) and `logstash` user (needed by default Epiphany installation of Filebeat) are not removed. If you want to perform configuration by Epiphany, set `kibanaserver_user_active` to `true` -for `kibanaserver` user or `logstash_user_active` for `logstash` user. For `logging` role, those settings are already set to `true` by default. +for `kibanaserver` user and/or `logstash_user_active` to `true` for `logstash` user. For `logging` role, those settings are already set to `true` by default. We strongly advice to set different password for each user. -To change `admin` user's password, change value for `admin_password` key. For `kibanaserver` and `logstash`, change values -for `kibanaserver_password` and `logstash_password` keys respectively. Changes from logging role will be propagated to Kibana -and Filebeat configuration. +To change `admin` user's password, you need to change the value for `admin_password` key ( see the example below ). For `kibanaserver` and `logstash`, you need to change values +for `kibanaserver_password` and `logstash_password` keys respectively. Changes from logging role will be propagated to OpenSearch Dashboards +and Filebeat configuration accordingly. ```yaml kind: configuration/logging title: Logging Config name: default specification: - ... + [...] admin_password: YOUR_PASSWORD kibanaserver_password: YOUR_PASSWORD kibanaserver_user_active: true @@ -286,32 +286,32 @@ specification: - snapshotrestore ``` -#### - Kibana role +#### OpenSearch Dashboards ( Kibana ) role -To set password of `kibanaserver` user, which is used by Kibana for communication with Open Distro Elasticsearch backend follow the procedure -described in [Logging role](#-logging-role). +To set password for `kibanaserver` user, which is used by Dashboards for communication with OpenSearch Dashboards backend follow the procedure +described in [Logging role](#logging-role). -#### - Filebeat role +#### Filebeat role -To set password of `logstash` user, which is used by Filebeat for communication with Open Distro Elasticsearch backend follow the procedure described +To set password of `logstash` user, which is used by Filebeat for communication with OpenSearch Dashboards backend follow the procedure described in [Logging role](#-logging-role). -### Open Distro for Elasticsearch component +### OpenSearch component By default Epiphany removes all demo users except `admin` user. Those users are listed in `demo_users_to_remove` section -of `configuration/opendistro-for-elasticsearch` doc. If you want to keep `kibanaserver` user (needed by default Epiphany installation of Kibana), -you need to remove it from `demo_users_to_remove` list and set `kibanaserver_user_active` to `true` in order to change the default password. +of `configuration/opensearch` manifest doc ( see example below ). If you want to keep `kibanaserver` user (needed by default Epiphany installation of OpenSearch Dashboards), +you need to exclude it from `demo_users_to_remove` list and set `kibanaserver_user_active` to `true` in order to change the default password. We strongly advice to set different password for each user. -To change `admin` user's password, change value for `admin_password` key. For `kibanaserver` and `logstash`, change values for `kibanaserver_password` +To change `admin` user's password, change value for the `admin_password` key. For `kibanaserver` and `logstash`, change values for `kibanaserver_password` and `logstash_password` keys respectively. ```yaml -kind: configuration/opendistro-for-elasticsearch -title: Open Distro for Elasticsearch Config +kind: configuration/opensearch +title: OpenSearch Config name: default specification: - ... + [...] admin_password: YOUR_PASSWORD kibanaserver_password: YOUR_PASSWORD kibanaserver_user_active: false @@ -325,9 +325,15 @@ specification: - kibanaserver ``` -### Upgrade of Elasticsearch, Kibana and Filebeat +### Upgrade of OpenSearch, OpenSearch Dashboards and Filebeat + +Keep in mind taht during the upgrade process Epiphany takes `kibanaserver` (for Dashboards) and `logstash` (for Filebeat) user passwords and re-applies them to upgraded configuration of Filebeat and Kibana. So if these password phrases differ from what was setup before upgrade, you should reflect these changes upon next login process. + + Epiphany upgrade of OpenSearch, OpenSearch Dashboards or Filebeat components will fail if `kibanaserver` or `logstash` usernames were changed in configuration of OpenSearch, OpenSearch Dashboards or Filebeat before. + +
-During upgrade Epiphany takes `kibanaserver` (for Kibana) and `logstash` (for Filebeat) user passwords and re-applies them to upgraded configuration of Filebeat and Kibana. Epiphany upgrade of Open Distro, Kibana or Filebeat will fail if `kibanaserver` or `logstash` usernames were changed in configuration of Kibana, Filebeat or Open Distro for Elasticsearch. +[1] For the backward compatibility needs, some naming conventions ( ie. kibanaserver user name ) are still present within the new ( OpenSearch ) platform though they will be suppresed in the future. In aftermath, Epiphany stack is also still using these names. # HAProxy diff --git a/docs/home/howto/RETENTION.md b/docs/home/howto/RETENTION.md index 3fa6ccdb9c..841681c000 100644 --- a/docs/home/howto/RETENTION.md +++ b/docs/home/howto/RETENTION.md @@ -1,7 +1,7 @@ An Epiphany cluster has a number of components which log, collect and retain data. To make sure that these do not exceed the usable storage of the machines they running on, the following configurations are available. -## Elasticsearch +## OpenSearch TODO diff --git a/docs/home/howto/SECURITY_GROUPS.md b/docs/home/howto/SECURITY_GROUPS.md index d9f84a09f3..2e0d0f6694 100644 --- a/docs/home/howto/SECURITY_GROUPS.md +++ b/docs/home/howto/SECURITY_GROUPS.md @@ -278,7 +278,7 @@ specification: count: 0 rabbitmq: count: 0 - opendistro_for_elasticsearch: + opensearch: count: 0 single_machine: count: 0 diff --git a/docs/home/howto/UPGRADE.md b/docs/home/howto/UPGRADE.md index 0086bbb0b1..09a93f3c77 100644 --- a/docs/home/howto/UPGRADE.md +++ b/docs/home/howto/UPGRADE.md @@ -1,5 +1,43 @@ # Upgrade - +- [Upgrade](#upgrade) + - [Introduction](#introduction) + - [Online upgrade](#online-upgrade) + - [Online prerequisites](#online-prerequisites) + - [Start the online upgrade](#start-the-online-upgrade) + - [Offline upgrade](#offline-upgrade) + - [Offline prerequisites](#offline-prerequisites) + - [Start the offline upgrade](#start-the-offline-upgrade) + - [Additional parameters](#additional-parameters) + - [Run *apply* after *upgrade*](#run-apply-after-upgrade) + - [Kubernetes applications](#kubernetes-applications) + - [How to upgrade Kafka](#how-to-upgrade-kafka) + - [Kafka upgrade](#kafka-upgrade) + - [ZooKeeper upgrade](#zookeeper-upgrade) + - [Migration from Open Distro for Elasticsearch & Kibana to OpenSearch and OpenSearch Dashboards](#migration-from-open-distro-for-elasticsearch--kibana-to-opensearch-and-opensearch-dashboards) + - [Open Distro for Elasticsearch upgrade](#open-distro-for-elasticsearch-upgrade) + - [Node exporter upgrade](#node-exporter-upgrade) + - [RabbitMQ upgrade](#rabbitmq-upgrade) + - [Kubernetes upgrade](#kubernetes-upgrade) + - [Prerequisites](#prerequisites) + - [PostgreSQL upgrade](#postgresql-upgrade) + - [Versions](#versions) + - [Prerequisites](#prerequisites-1) + - [Upgrade](#upgrade-1) + - [Manual actions](#manual-actions) + - [Post-upgrade processing](#post-upgrade-processing) + - [Statistics](#statistics) + - [Delete old cluster](#delete-old-cluster) + - [Terraform upgrade from Epiphany 1.x to 2.x](#terraform-upgrade-from-epiphany-1x-to-2x) + - [Azure](#azure) + - [v0.12.6 => v0.13.x](#v0126--v013x) + - [v0.13.x => v0.14.x](#v013x--v014x) + - [v0.14.x => v1.0.x](#v014x--v10x) + - [v1.0.x => v1.1.3](#v10x--v113) + - [AWS](#aws) + - [v0.12.6 => v0.13.x](#v0126--v013x-1) + - [v0.13.x => v0.14.x](#v013x--v014x-1) + - [v0.14.x => v1.0.x](#v014x--v10x-1) + - [v1.0.x => v1.1.3](#v10x--v113-1) ## Introduction From Epicli 0.4.2 and up the CLI has the ability to perform upgrades on certain components on a cluster. The components @@ -200,7 +238,7 @@ specification: count: 1 rabbitmq: count: 0 - opendistro_for_elasticsearch: + opensearch: count: 0 name: clustername prefix: 'prefix' @@ -259,6 +297,51 @@ then start with the rest **one by one**. More detailed information about ZooKeeper you can find in [ZooKeeper documentation](https://cwiki.apache.org/confluence/display/ZOOKEEPER). +## Migration from Open Distro for Elasticsearch & Kibana to OpenSearch and OpenSearch Dashboards + +--- +**NOTE** + +Make sure you have a backup before proceeding to migration steps described below ! + +--- +Following the decision of Elastic NV[1] on ceasing open source options available for Elasticsearch and Kibana and releasing them under the Elastic license (more info [here](https://github.com/epiphany-platform/epiphany/issues/2870)) Epiphany team decided to implement a mechanism of automatic migration from ElasticSearch 7.10.2 to OpenSearch 1.2.4. + +It is important to remember that while the new platform makes an effort to continue to support a broad set of third party tools (ie. Beats tools) however there can be some drawbacks or even malfunctions came across all over the way as not everything have been tested or have explicitly been added to OpenSearch compatibility scope[2]. +Additionally some of the components (ie. ElasticSearch Curator) or some embedded service accounts ( ie. _kibanaserver_) can be still found in OpenSearch environment but they will be successfully phased out. + +The migration can be fired by placing `odfe_migration` switch in your manifest file: +```yaml +[..] +--- +kind: configuration/logging +title: Logging Config +[..] +specification: + [..] + odfe_migration: true # <<------- + [..] +``` +and running the `upgrade` command against the logging component of your Epiphany installation, together with a `-f` option: +``` +epicli upgrade -b / --upgrade-components "logging,filebeat" -f /.yml +``` +Keep in mind, that for the current version of OPS/OPSD it is necessary to include the `filebeat` component along with the loggging one in order to implement the workaround for _Kibana API not available_ [bug](https://github.com/opensearch-project/OpenSearch-Dashboards/issues/656#issuecomment-978036236). +The default value of the `odfe_migration` parameter is set to _false_. + +All described below remarks related to TLS certificates of the Open Distro upgrade stay valid. You should plan and test all your upgrade activities before proceeding on the production. + +Upgrade of the ESS/ODFE versions not shipped with the previous Epiphany releases is not supported. If your environment is customized it needs to be standardized ( as described in [this](https://opensearch.org/docs/latest/upgrade-to/upgrade-to/#upgrade-paths) table ) prior to running the subject migration. + +Migration of Elasticsearch Curator is not supported. More info on use of Curator in OpenSearch environment can be found [here](https://github.com/opensearch-project/OpenSearch/issues/1352). + +[1] https://www.elastic.co/pricing/faq/licensing#what-are-the-key-changes-being-made-to-the-elastic-license + +[2] https://opensearch.org/docs/latest/clients/agents-and-ingestion-tools/index/ + + +
+ ## Open Distro for Elasticsearch upgrade --- @@ -268,9 +351,9 @@ Before upgrade procedure make sure you have a data backup! --- -Since Epiphany v1.0.0 we provide upgrade elasticsearch-oss package to v7.10.2 and opendistro-\* plugins package to +Since Epiphany v1.0.0 we provide upgrade elasticsearch-oss package to v7.10.2 and opensearch-\* plugins package to v1.13.\*. Upgrade will be performed automatically when the upgrade procedure detects your `logging` -, `opendistro_for_elasticsearch` or `kibana` hosts. +, `opensearch` or `kibana` hosts. Upgrade of Elasticsearch uses API calls (GET, PUT, POST) which requires an admin TLS certificate. By default, Epiphany generates self-signed certificates for this purpose but if you use your own, you have to provide the admin certificate's @@ -283,7 +366,7 @@ logging: cert_path: /etc/elasticsearch/custom-admin.pem key_path: /etc/elasticsearch/custom-admin-key.pem -opendistro_for_elasticsearch: +opensearch: upgrade_config: custom_admin_certificate: cert_path: /etc/elasticsearch/custom-admin.pem diff --git a/schema/any/defaults/epiphany-cluster.yml b/schema/any/defaults/epiphany-cluster.yml index 27a3014ac2..43218bcee7 100644 --- a/schema/any/defaults/epiphany-cluster.yml +++ b/schema/any/defaults/epiphany-cluster.yml @@ -41,7 +41,7 @@ specification: count: 0 machines: [] configuration: default - opendistro_for_elasticsearch: + opensearch: count: 0 machines: [] configuration: default diff --git a/schema/aws/defaults/epiphany-cluster.yml b/schema/aws/defaults/epiphany-cluster.yml index f50a21cb6d..19c3236c82 100644 --- a/schema/aws/defaults/epiphany-cluster.yml +++ b/schema/aws/defaults/epiphany-cluster.yml @@ -74,9 +74,9 @@ specification: machine: rabbitmq-machine configuration: default subnets: - - address_pool: 10.1.8.0/24 - availability_zone: eu-west-2a - opendistro_for_elasticsearch: + - availability_zone: eu-west-2a + address_pool: 10.1.8.0/24 + opensearch: count: 0 machine: logging-machine configuration: default diff --git a/schema/azure/defaults/epiphany-cluster.yml b/schema/azure/defaults/epiphany-cluster.yml index 331821bcea..28b3e5e00f 100644 --- a/schema/azure/defaults/epiphany-cluster.yml +++ b/schema/azure/defaults/epiphany-cluster.yml @@ -68,7 +68,7 @@ specification: configuration: default subnets: - address_pool: 10.1.8.0/24 - opendistro_for_elasticsearch: + opensearch: count: 0 machine: logging-machine configuration: default diff --git a/schema/common/defaults/configuration/feature-mapping.yml b/schema/common/defaults/configuration/feature-mapping.yml index 8050b2b9e3..43de112f41 100644 --- a/schema/common/defaults/configuration/feature-mapping.yml +++ b/schema/common/defaults/configuration/feature-mapping.yml @@ -17,11 +17,11 @@ specification: enabled: true - name: logging enabled: true - - name: opendistro-for-elasticsearch + - name: opensearch enabled: true - name: elasticsearch-curator enabled: true - - name: kibana + - name: opensearch-dashboards enabled: true - name: filebeat enabled: true @@ -66,7 +66,7 @@ specification: - firewall logging: - logging - - kibana + - opensearch-dashboards - node-exporter - filebeat - firewall @@ -121,8 +121,8 @@ specification: - node-exporter - filebeat - firewall - opendistro_for_elasticsearch: - - opendistro-for-elasticsearch + opensearch: + - opensearch - node-exporter - filebeat - firewall diff --git a/schema/common/defaults/configuration/firewall.yml b/schema/common/defaults/configuration/firewall.yml index 8a9d66493c..3a8c2f2b33 100644 --- a/schema/common/defaults/configuration/firewall.yml +++ b/schema/common/defaults/configuration/firewall.yml @@ -71,7 +71,7 @@ specification: enabled: true ports: - 9100/tcp - opendistro_for_elasticsearch: + opensearch: enabled: true ports: - 9200/tcp diff --git a/schema/common/defaults/configuration/kibana.yml b/schema/common/defaults/configuration/kibana.yml deleted file mode 100644 index bea9fbb13b..0000000000 --- a/schema/common/defaults/configuration/kibana.yml +++ /dev/null @@ -1,5 +0,0 @@ -kind: configuration/kibana -title: "Kibana" -name: default -specification: - kibana_log_dir: /var/log/kibana diff --git a/schema/common/defaults/configuration/logging.yml b/schema/common/defaults/configuration/logging.yml index be687c2e65..cb971cce68 100644 --- a/schema/common/defaults/configuration/logging.yml +++ b/schema/common/defaults/configuration/logging.yml @@ -2,7 +2,11 @@ kind: configuration/logging title: Logging Config name: default specification: - cluster_name: EpiphanyElastic + cluster_name: EpiphanyOpenSearch + opensearch_os_user: opensearch + opensearch_os_group: opensearch + dashboards_os_user: opensearchboard + dashboards_os_user_password: PASSWORD_TO_CHANGE admin_password: PASSWORD_TO_CHANGE kibanaserver_password: PASSWORD_TO_CHANGE kibanaserver_user_active: true @@ -13,12 +17,20 @@ specification: - readall - snapshotrestore paths: - data: /var/lib/elasticsearch - repo: /var/lib/elasticsearch-snapshots - logs: /var/log/elasticsearch + opensearch_home: /usr/share/opensearch + opensearch_conf_dir: /usr/share/opensearch/config + opensearch_log_dir: /var/log/opensearch + opensearch_plugin_bin_path: /usr/share/opensearch/bin/opensearch-plugin + opensearch_repo: /var/lib/opensearch-snapshots + opensearch_data: /var/lib/opensearch + opensearch_logs: /var/log/opensearch + opensearch_perftop_home: /usr/share/opensearch/perftop + opsd_plugin_bin_path: /usr/share/opensearch-dashboards/bin/opensearch-dashboards-plugin + opsd_home: /usr/share/opensearch-dashboards + opsd_conf_dir: /usr/share/opensearch-dashboards/config jvm_options: - Xmx: 1g # see https://www.elastic.co/guide/en/elasticsearch/reference/7.9/heap-size.html - opendistro_security: + Xmx: 1g + opensearch_security: ssl: transport: enforce_hostname_verification: true diff --git a/schema/common/defaults/configuration/opendistro-for-elasticsearch.yml b/schema/common/defaults/configuration/opendistro-for-elasticsearch.yml deleted file mode 100644 index 9f3979d722..0000000000 --- a/schema/common/defaults/configuration/opendistro-for-elasticsearch.yml +++ /dev/null @@ -1,27 +0,0 @@ -kind: configuration/opendistro-for-elasticsearch -title: Open Distro for Elasticsearch Config -name: default -specification: - cluster_name: EpiphanyElastic - clustered: true - admin_password: PASSWORD_TO_CHANGE - kibanaserver_password: PASSWORD_TO_CHANGE - kibanaserver_user_active: false - logstash_password: PASSWORD_TO_CHANGE - logstash_user_active: false - demo_users_to_remove: - - kibanaro - - readall - - snapshotrestore - - logstash - - kibanaserver - paths: - data: /var/lib/elasticsearch - repo: /var/lib/elasticsearch-snapshots - logs: /var/log/elasticsearch - jvm_options: - Xmx: 1g # see https://www.elastic.co/guide/en/elasticsearch/reference/7.9/heap-size.html - opendistro_security: - ssl: - transport: - enforce_hostname_verification: true diff --git a/schema/common/defaults/configuration/opensearch-dashboards.yml b/schema/common/defaults/configuration/opensearch-dashboards.yml new file mode 100644 index 0000000000..836b091c1f --- /dev/null +++ b/schema/common/defaults/configuration/opensearch-dashboards.yml @@ -0,0 +1,13 @@ +kind: configuration/opensearch-dashboards +title: "OpenSearch-Dashboards" +name: default +specification: + dashboards_os_user: opensearchdboard + dashboards_os_user_password: PASSWORD_TO_CHANGE + dashboards_user: kibanaserver + dashboards_user_password: PASSWORD_TO_CHANGE + paths: + opensearchdash_home: /usr/share/opensearch-dashboards + opensearchdash_conf_dir: /usr/share/opensearch-dashboards/config + opensearchdash_plugin_bin_path: /usr/share/opensearch-dashboards/bin/opensearch-dashboards-plugin + opensearchdash_log_dir: /var/log/opensearchdashboards diff --git a/schema/common/defaults/configuration/opensearch.yml b/schema/common/defaults/configuration/opensearch.yml new file mode 100644 index 0000000000..287d29dca9 --- /dev/null +++ b/schema/common/defaults/configuration/opensearch.yml @@ -0,0 +1,37 @@ +kind: configuration/opensearch +title: OpenSearch Config +name: default +specification: + cluster_name: EpiphanyOpensearch + odfe_migration: false + clustered: true + opensearch_os_user: opensearch + opensearch_os_group: opensearch + admin_password: PASSWORD_TO_CHANGE + kibanaserver_password: PASSWORD_TO_CHANGE + kibanaserver_user_active: false + logstash_password: PASSWORD_TO_CHANGE + logstash_user_active: false + demo_users_to_remove: + - kibanaro + - readall + - snapshotrestore + - logstash + - kibanaserver + paths: + opensearch_home: /usr/share/opensearch + opensearch_conf_dir: /usr/share/opensearch/config + opensearch_log_dir: /var/log/opensearch + opensearch_repo: /var/lib/opensearch-snapshots + opensearch_data: /var/lib/opensearch + opensearch_logs: /var/log/opensearch + opensearch_perftop_home: /usr/share/opensearch/perftop + opensearchdash_plugin_bin_path: /usr/share/opensearch-dashboards/bin/opensearch-dashboards-plugin + opensearchdash_home: /usr/share/opensearch-dashboards + opensearchdash_conf_dir: /usr/share/opensearch-dashboards/config + jvm_options: + Xmx: 1g + opensearch_security: + ssl: + transport: + enforce_hostname_verification: true diff --git a/schema/common/validation/configuration/feature-mapping.yml b/schema/common/validation/configuration/feature-mapping.yml index 85b954b095..f6791f4b43 100644 --- a/schema/common/validation/configuration/feature-mapping.yml +++ b/schema/common/validation/configuration/feature-mapping.yml @@ -55,7 +55,7 @@ properties: type: array items: type: string - opendistro_for_elasticsearch: + opensearch: type: array items: type: string diff --git a/schema/common/validation/configuration/firewall.yml b/schema/common/validation/configuration/firewall.yml index 82148a9453..1de1ba9bcf 100644 --- a/schema/common/validation/configuration/firewall.yml +++ b/schema/common/validation/configuration/firewall.yml @@ -134,7 +134,7 @@ properties: type: array items: type: string - opendistro_for_elasticsearch: + opensearch: type: object properties: enabled: diff --git a/schema/common/validation/configuration/kibana.yml b/schema/common/validation/configuration/kibana.yml deleted file mode 100644 index 17b77c2e15..0000000000 --- a/schema/common/validation/configuration/kibana.yml +++ /dev/null @@ -1,7 +0,0 @@ -"$id": "#/specification" -title: "Kibana specification schema" -description: "Kibana specification schema" -type: object -properties: - kibana_log_dir: - type: string diff --git a/schema/common/validation/configuration/logging.yml b/schema/common/validation/configuration/logging.yml index 2a434160a0..12dcf7bea8 100644 --- a/schema/common/validation/configuration/logging.yml +++ b/schema/common/validation/configuration/logging.yml @@ -32,7 +32,7 @@ properties: properties: Xmx: type: string - opendistro_security: + opensearch_security: type: object properties: ssl: diff --git a/schema/common/validation/configuration/opensearch-dashboards.yml b/schema/common/validation/configuration/opensearch-dashboards.yml new file mode 100644 index 0000000000..527cae109d --- /dev/null +++ b/schema/common/validation/configuration/opensearch-dashboards.yml @@ -0,0 +1,5 @@ +kind: configuration/opensearch-dashboards +title: "OpenSearch Dashboards specification schema" +name: default +specification: + opensearch_dashboards_log_dir: /var/log/opensearchdashboards diff --git a/schema/common/validation/configuration/opendistro-for-elasticsearch.yml b/schema/common/validation/configuration/opensearch.yml similarity index 85% rename from schema/common/validation/configuration/opendistro-for-elasticsearch.yml rename to schema/common/validation/configuration/opensearch.yml index 3992bc36ab..4ccecb9bcf 100644 --- a/schema/common/validation/configuration/opendistro-for-elasticsearch.yml +++ b/schema/common/validation/configuration/opensearch.yml @@ -1,10 +1,12 @@ "$id": "#/specification" -title: "Opendistro-for-elasticsearch specification schema" -description: "Opendistro-for-elasticsearch specification schema" +title: "opensearch schema" +description: "OpenSearch specification schema" type: object properties: cluster_name: type: string + odfe_migration: + type: boolean clustered: type: boolean admin_password: @@ -35,7 +37,7 @@ properties: properties: Xmx: type: string - opendistro_security: + opensearch_security: type: object properties: ssl: