diff --git a/README.md b/README.md
index b996c4b75d..7fe94fdcc5 100644
--- a/README.md
+++ b/README.md
@@ -8,9 +8,9 @@ Epiphany at its core is a full automation of Kubernetes and Docker plus addition
- Kafka or RabbitMQ for high speed messaging/events
- Prometheus and Alertmanager for monitoring with Graphana for visualization
-- Elasticsearch and Kibana for centralized logging (OpenDistro)
+- OpenSearch for centralized logging
- HAProxy for loadbalancing
-- Postgres and Elasticsearch for data storage
+- Postgres and OpenSearch for data storage
- KeyCloak for authentication
- Helm as package manager for Kubernetes
diff --git a/ansible/playbooks/backup_logging.yml b/ansible/playbooks/backup_logging.yml
index c3a31e1df3..cfd77b545b 100644
--- a/ansible/playbooks/backup_logging.yml
+++ b/ansible/playbooks/backup_logging.yml
@@ -16,27 +16,19 @@
- name: Run elasticsearch snapshot tasks
import_role:
name: backup
- tasks_from: logging_elasticsearch_snapshot
+ tasks_from: logging_opensearch_snapshot
- name: Run elasticsearch archive tasks
import_role:
name: backup
- tasks_from: logging_elasticsearch_etc
-
-- hosts: kibana[0]
- gather_facts: true
- become: true
- become_method: sudo
- serial: 1
- tasks:
- - when: specification.components.logging.enabled | default(false)
- block:
- - name: Include kibana vars
+ tasks_from: logging_opensearch_conf
+ # OpenSearch Dashboards
+ - name: Include opensearch_dashboards vars
include_vars:
- file: roles/kibana/vars/main.yml
+ file: roles/opensearch_dashboards/vars/main.yml
name: component_vars
- - name: Run kibana backup tasks
+ - name: Run opensearch_dashboards backup tasks
import_role:
name: backup
- tasks_from: logging_kibana_etc
+ tasks_from: logging_opensearch_dashboards_conf
vars:
snapshot_name: "{{ hostvars[groups.logging.0].snapshot_name }}"
diff --git a/ansible/playbooks/filebeat.yml b/ansible/playbooks/filebeat.yml
index d2295b29c3..952fefa1aa 100644
--- a/ansible/playbooks/filebeat.yml
+++ b/ansible/playbooks/filebeat.yml
@@ -1,7 +1,7 @@
---
# Ansible playbook that installs and configures Filebeat
-- hosts: opendistro_for_elasticsearch:logging:kibana # to gather facts
+- hosts: opensearch:logging:opensearch_dashboards # to gather facts
tasks: []
- hosts: filebeat
diff --git a/ansible/playbooks/kibana.yml b/ansible/playbooks/kibana.yml
deleted file mode 100644
index b47fa3425c..0000000000
--- a/ansible/playbooks/kibana.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# Ansible playbook that makes sure the base items for all nodes are installed
-
-- hosts: all
- gather_facts: true
- tasks: []
-
-- hosts: kibana
- become: true
- become_method: sudo
- roles:
- - kibana
diff --git a/ansible/playbooks/opendistro_for_elasticsearch.yml b/ansible/playbooks/opendistro_for_elasticsearch.yml
deleted file mode 100644
index 9ec9a72ed6..0000000000
--- a/ansible/playbooks/opendistro_for_elasticsearch.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# Ansible playbook for installing Elasticsearch
-
-- hosts: opendistro_for_elasticsearch
- become: true
- become_method: sudo
- roles:
- - opendistro_for_elasticsearch
- vars:
- current_group_name: "opendistro_for_elasticsearch"
diff --git a/ansible/playbooks/opensearch.yml b/ansible/playbooks/opensearch.yml
new file mode 100644
index 0000000000..b4a6e188df
--- /dev/null
+++ b/ansible/playbooks/opensearch.yml
@@ -0,0 +1,10 @@
+---
+# Ansible playbook for installing OpenSearch
+
+- hosts: opensearch
+ become: true
+ become_method: sudo
+ roles:
+ - opensearch
+ vars:
+ current_group_name: "opensearch"
diff --git a/ansible/playbooks/opensearch_dashboards.yml b/ansible/playbooks/opensearch_dashboards.yml
new file mode 100644
index 0000000000..0d16452b38
--- /dev/null
+++ b/ansible/playbooks/opensearch_dashboards.yml
@@ -0,0 +1,11 @@
+---
+# Ansible playbook for installing OpenSearch Dashboards
+
+- hosts: repository # to gather facts
+ tasks: []
+
+- hosts: opensearch_dashboards
+ become: true
+ become_method: sudo
+ roles:
+ - opensearch_dashboards
diff --git a/ansible/playbooks/recovery_logging.yml b/ansible/playbooks/recovery_logging.yml
index 796d1c0bae..2a15a98ed2 100644
--- a/ansible/playbooks/recovery_logging.yml
+++ b/ansible/playbooks/recovery_logging.yml
@@ -13,22 +13,15 @@
name: component_vars
- import_role:
name: recovery
- tasks_from: logging_elasticsearch_etc
+ tasks_from: logging_opensearch_conf
- import_role:
name: recovery
- tasks_from: logging_elasticsearch_snapshot
+ tasks_from: logging_opensearch_snapshot
-- hosts: kibana[0]
- gather_facts: true
- become: true
- become_method: sudo
- serial: 1
- tasks:
- - when: specification.components.logging.enabled | default(false)
- block:
+ # OpenSearch Dashboards
- include_vars:
- file: roles/kibana/vars/main.yml
+ file: roles/opensearch_dashboards/vars/main.yml
name: component_vars
- import_role:
name: recovery
- tasks_from: logging_kibana_etc
+ tasks_from: logging_opensearch_dashboards_conf
diff --git a/ansible/playbooks/roles/backup/defaults/main.yml b/ansible/playbooks/roles/backup/defaults/main.yml
index 6f454115bb..10b2779a70 100644
--- a/ansible/playbooks/roles/backup/defaults/main.yml
+++ b/ansible/playbooks/roles/backup/defaults/main.yml
@@ -2,6 +2,6 @@
backup_dir: /epibackup
backup_destination_dir: "{{ backup_dir }}/mounted"
backup_destination_host: >-
- {{ groups.repository[0] if (custom_repository_url | default(false)) else (resolved_repository_hostname | default(groups.repository[0])) }}
-elasticsearch_snapshot_repository_name: epiphany
-elasticsearch_snapshot_repository_location: /var/lib/elasticsearch-snapshots
+ "{{ groups.repository[0] if (custom_repository_url | default(false)) else (resolved_repository_hostname | default(groups.repository[0])) }}"
+opensearch_snapshot_repository_name: epiphany
+opensearch_snapshot_repository_location: /var/lib/opensearch-snapshots
diff --git a/ansible/playbooks/roles/backup/tasks/logging_elasticsearch_snapshot.yml b/ansible/playbooks/roles/backup/tasks/logging_elasticsearch_snapshot.yml
deleted file mode 100644
index 6857739ce0..0000000000
--- a/ansible/playbooks/roles/backup/tasks/logging_elasticsearch_snapshot.yml
+++ /dev/null
@@ -1,90 +0,0 @@
----
-- name: Include default vars from opendistro_for_elasticsearch role
- include_vars:
- file: roles/opendistro_for_elasticsearch/defaults/main.yml
- name: odfe
-
-- name: Set helper facts
- set_fact:
- elasticsearch_endpoint: >-
- https://{{ ansible_default_ipv4.address }}:9200
- snapshot_name: >-
- {{ ansible_date_time.iso8601_basic_short | replace('T','-') }}
- vars:
- uri_template: &uri
- client_cert: "{{ odfe.certificates.dirs.certs }}/{{ odfe.certificates.files.admin.cert.filename }}"
- client_key: "{{ odfe.certificates.dirs.certs }}/{{ odfe.certificates.files.admin.key.filename }}"
- validate_certs: false
- body_format: json
-
-- name: Display snapshot name
- debug: var=snapshot_name
-
-- name: Check cluster health
- uri:
- <<: *uri
- url: "{{ elasticsearch_endpoint }}/_cluster/health"
- method: GET
- register: uri_response
- until: uri_response is success
- retries: 12
- delay: 5
-
-- name: Ensure snapshot repository is defined
- uri:
- <<: *uri
- url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}"
- method: PUT
- body:
- type: fs
- settings:
- location: "{{ elasticsearch_snapshot_repository_location }}"
- compress: true
-
-- name: Trigger snapshot creation
- uri:
- <<: *uri
- url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}/{{ snapshot_name }}"
- method: PUT
-
-- name: Wait (up to 12h) for snapshot completion
- uri:
- <<: *uri
- url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}/{{ snapshot_name }}"
- method: GET
- register: uri_response
- until: (uri_response.json.snapshots | selectattr('snapshot', 'equalto', snapshot_name) | first).state == "SUCCESS"
- retries: "{{ (12 * 3600 // 10) | int }}" # 12h
- delay: 10
-
-- name: Find all snapshots
- uri:
- <<: *uri
- url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}/_all"
- method: GET
- register: uri_response
-
-- name: Delete old snapshots
- uri:
- <<: *uri
- url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}/{{ item }}"
- method: DELETE
- loop: >-
- {{ uri_response.json.snapshots | map(attribute='snapshot') | reject('equalto', snapshot_name) | list }}
-
-- name: Create snapshot archive
- import_tasks: common/create_snapshot_archive.yml
- vars:
- snapshot_prefix: "elasticsearch_snapshot"
- dirs_to_archive:
- - "{{ elasticsearch_snapshot_repository_location }}/"
-
-- name: Create snapshot checksum
- import_tasks: common/create_snapshot_checksum.yml
-
-- name: Transfer artifacts via rsync
- import_tasks: common/download_via_rsync.yml
- vars:
- artifacts:
- - "{{ snapshot_path }}"
- - "{{ snapshot_path }}.sha1"
diff --git a/ansible/playbooks/roles/backup/tasks/logging_elasticsearch_etc.yml b/ansible/playbooks/roles/backup/tasks/logging_opensearch_conf.yml
similarity index 64%
rename from ansible/playbooks/roles/backup/tasks/logging_elasticsearch_etc.yml
rename to ansible/playbooks/roles/backup/tasks/logging_opensearch_conf.yml
index 1fa5c38750..51803a018b 100644
--- a/ansible/playbooks/roles/backup/tasks/logging_elasticsearch_etc.yml
+++ b/ansible/playbooks/roles/backup/tasks/logging_opensearch_conf.yml
@@ -1,4 +1,14 @@
---
+- name: Include default vars from opensearch role
+ include_vars:
+ file: roles/opensearch/defaults/main.yml
+ name: opensearch_defaults
+
+- name: Include vars from opensearch role
+ include_vars:
+ file: roles/opensearch/vars/main.yml
+ name: opensearch_vars
+
- name: Assert that the snapshot_name fact is defined and valid
assert:
that:
@@ -13,9 +23,9 @@
- name: Create snapshot archive
import_tasks: common/create_snapshot_archive.yml
vars:
- snapshot_prefix: "elasticsearch_etc"
+ snapshot_prefix: "opensearch_conf"
dirs_to_archive:
- - /etc/elasticsearch/
+ - "{{ opensearch_vars.specification.paths.opensearch_conf_dir }}"
- name: Create snapshot checksum
import_tasks: common/create_snapshot_checksum.yml
diff --git a/ansible/playbooks/roles/backup/tasks/logging_kibana_etc.yml b/ansible/playbooks/roles/backup/tasks/logging_opensearch_dashboards_conf.yml
similarity index 69%
rename from ansible/playbooks/roles/backup/tasks/logging_kibana_etc.yml
rename to ansible/playbooks/roles/backup/tasks/logging_opensearch_dashboards_conf.yml
index acc84d08b3..98a660b802 100644
--- a/ansible/playbooks/roles/backup/tasks/logging_kibana_etc.yml
+++ b/ansible/playbooks/roles/backup/tasks/logging_opensearch_dashboards_conf.yml
@@ -10,12 +10,17 @@
- name: Display snapshot name
debug: var=snapshot_name
+- name: Include vars from opensearch_dashboards role
+ include_vars:
+ file: roles/opensearch_dashboards/vars/main.yml
+ name: opensearch_dashboards_vars
+
- name: Create snapshot archive
import_tasks: common/create_snapshot_archive.yml
vars:
- snapshot_prefix: "kibana_etc"
+ snapshot_prefix: "opensearch_dashboards_conf_dir"
dirs_to_archive:
- - /etc/kibana/
+ - "{{ opensearch_dashboards_vars.specification.paths.opensearch_dashboards_conf_dir }}"
- name: Create snapshot checksum
import_tasks: common/create_snapshot_checksum.yml
diff --git a/ansible/playbooks/roles/backup/tasks/logging_opensearch_snapshot.yml b/ansible/playbooks/roles/backup/tasks/logging_opensearch_snapshot.yml
new file mode 100644
index 0000000000..d7425bde74
--- /dev/null
+++ b/ansible/playbooks/roles/backup/tasks/logging_opensearch_snapshot.yml
@@ -0,0 +1,96 @@
+---
+- name: Include default vars from opensearch role
+ include_vars:
+ file: roles/opensearch/defaults/main.yml
+ name: opensearch_defaults
+
+- name: Set helper facts
+ set_fact:
+ opensearch_endpoint: >-
+ https://{{ ansible_default_ipv4.address }}:9200
+ snapshot_name: >-
+ {{ ansible_date_time.iso8601_basic_short | replace('T','-') }}
+ vars:
+ uri_template: &uri
+ client_cert: "{{ opensearch_defaults.certificates.dirs.certs }}/{{ opensearch_defaults.certificates.files.admin.cert.filename }}"
+ client_key: "{{ opensearch_defaults.certificates.dirs.certs }}/{{ opensearch_defaults.certificates.files.admin.key.filename }}"
+ validate_certs: false
+ body_format: json
+
+- name: Check cluster health
+ uri:
+ <<: *uri
+ url: "{{ opensearch_endpoint }}/_cluster/health"
+ method: GET
+ return_content: yes
+ register: cluster_status
+ until: cluster_status.json.status
+ retries: 60
+ delay: 1
+
+- name: Show warning when backup is not supported
+ when: not cluster_status.json.number_of_nodes == 1
+ debug:
+ msg: "[WARNING] No snapshot backup created as only single-node cluster backup is supported."
+
+- name: Snapshot backup
+ when: cluster_status.json.number_of_nodes == 1 # https://github.com/epiphany-platform/epiphany/blob/develop/docs/home/howto/BACKUP.md#logging
+ block:
+ - name: Ensure snapshot repository is defined
+ uri:
+ <<: *uri
+ url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}"
+ method: PUT
+ body:
+ type: fs
+ settings:
+ location: "{{ opensearch_snapshot_repository_location }}"
+ compress: true
+
+ - name: Trigger snapshot creation
+ uri:
+ <<: *uri
+ url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}/{{ snapshot_name }}"
+ method: PUT
+
+ - name: Wait (up to 12h) for snapshot completion
+ uri:
+ <<: *uri
+ url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}/{{ snapshot_name }}"
+ method: GET
+ register: uri_response
+ until: (uri_response.json.snapshots | selectattr('snapshot', 'equalto', snapshot_name) | first).state == "SUCCESS"
+ retries: "{{ (12 * 3600 // 10) | int }}" # 12h
+ delay: 10
+
+ - name: Find all snapshots
+ uri:
+ <<: *uri
+ url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}/_all"
+ method: GET
+ register: uri_response
+
+ - name: Delete old snapshots
+ uri:
+ <<: *uri
+ url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}/{{ item }}"
+ method: DELETE
+ loop: >-
+ {{ uri_response.json.snapshots | map(attribute='snapshot') | reject('equalto', snapshot_name) | list }}
+
+ - name: Create snapshot archive
+ import_tasks: common/create_snapshot_archive.yml
+ vars:
+ snapshot_prefix: "opensearch_snapshot"
+ dirs_to_archive:
+ - "{{ opensearch_snapshot_repository_location }}/"
+
+ - name: Create snapshot checksum
+ import_tasks: common/create_snapshot_checksum.yml
+
+ - name: Transfer artifacts via rsync
+ import_tasks: common/download_via_rsync.yml
+ vars:
+ artifacts:
+ - "{{ snapshot_path }}"
+ - "{{ snapshot_path }}.sha1"
diff --git a/ansible/playbooks/roles/certificate/tasks/install-packages.yml b/ansible/playbooks/roles/certificate/tasks/install-packages.yml
index 47a77683de..6927a66bca 100644
--- a/ansible/playbooks/roles/certificate/tasks/install-packages.yml
+++ b/ansible/playbooks/roles/certificate/tasks/install-packages.yml
@@ -10,4 +10,4 @@
RedHat:
- python3-cryptography
module_defaults:
- yum: { lock_timeout: "{{ yum_lock_timeout }}" }
+ yum: {lock_timeout: "{{ yum_lock_timeout }}"}
diff --git a/ansible/playbooks/roles/elasticsearch_curator/tasks/main.yml b/ansible/playbooks/roles/elasticsearch_curator/tasks/main.yml
index ca80432e62..d743ae642f 100644
--- a/ansible/playbooks/roles/elasticsearch_curator/tasks/main.yml
+++ b/ansible/playbooks/roles/elasticsearch_curator/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Include installation task
- include_tasks: install-es-curator-{{ ansible_os_family }}.yml
+ include_tasks: install-ops-curator-{{ ansible_os_family }}.yml
- name: Include configuration tasks
include_tasks: configure-cron-jobs.yml
diff --git a/ansible/playbooks/roles/filebeat/tasks/configure-filebeat.yml b/ansible/playbooks/roles/filebeat/tasks/configure-filebeat.yml
index cb7e2a723e..2312be41c0 100644
--- a/ansible/playbooks/roles/filebeat/tasks/configure-filebeat.yml
+++ b/ansible/playbooks/roles/filebeat/tasks/configure-filebeat.yml
@@ -7,16 +7,16 @@
name: postgresql_defaults
when: "'postgresql' in group_names"
-# Do not select Kibana configured to use ES deployed by 'opendistro_for_elasticsearch' role
+# Do not select OpenSearch Dashboards configured host to use OpenSearch deployed by 'opensearch' role
- name: Set value for setup.kibana.host
set_fact:
setup_kibana_host: >-
- {{ hostvars[groups.kibana|intersect(groups.logging)|first]['ansible_hostname'] }}
+ {{ hostvars[groups.opensearch_dashboards|intersect(groups.logging)|first]['ansible_hostname'] }}
when:
- not is_upgrade_run
- - groups.kibana[0] is defined
+ - groups.opensearch_dashboards[0] is defined
- groups.logging is defined
- - groups.kibana | intersect(groups.logging) | length
+ - groups.opensearch_dashboards | intersect(groups.logging) | length
- name: Copy configuration file (filebeat.yml)
template:
diff --git a/ansible/playbooks/roles/filebeat/tasks/main.yml b/ansible/playbooks/roles/filebeat/tasks/main.yml
index 4cdfe32550..4568ff85ee 100644
--- a/ansible/playbooks/roles/filebeat/tasks/main.yml
+++ b/ansible/playbooks/roles/filebeat/tasks/main.yml
@@ -5,7 +5,7 @@
- name: Load variables from logging role # needed to get passwords for both installation types
include_vars:
file: roles/logging/vars/main.yml
- name: opendistro_for_logging_vars
+ name: logging_vars
when: groups.logging is defined
- name: Include installation tasks for Filebeat as DaemonSet for "k8s as cloud service"
diff --git a/ansible/playbooks/roles/filebeat/templates/custom-chart-values.yml.j2 b/ansible/playbooks/roles/filebeat/templates/custom-chart-values.yml.j2
index 831897a347..d5a91e7f3a 100644
--- a/ansible/playbooks/roles/filebeat/templates/custom-chart-values.yml.j2
+++ b/ansible/playbooks/roles/filebeat/templates/custom-chart-values.yml.j2
@@ -96,7 +96,7 @@ filebeatConfig:
{% endfor %}
username: logstash
- password: {{ "'%s'" % opendistro_for_logging_vars.specification.logstash_password | replace("'","''") }}
+ password: {{ "'%s'" % logging_vars.specification.logstash_password | replace("'","''") }}
{# Controls the verification of certificates #}
ssl.verification_mode: none
diff --git a/ansible/playbooks/roles/filebeat/templates/filebeat.yml.j2 b/ansible/playbooks/roles/filebeat/templates/filebeat.yml.j2
index a6715edf20..70dc3f92dc 100644
--- a/ansible/playbooks/roles/filebeat/templates/filebeat.yml.j2
+++ b/ansible/playbooks/roles/filebeat/templates/filebeat.yml.j2
@@ -144,6 +144,12 @@ filebeat.config.modules:
# ======================= Elasticsearch template setting =======================
+{% if is_upgrade_run %}
+setup.template.overwrite: true
+setup.template.append_fields:
+ - name: log.file.path
+ type: text
+{% endif %}
setup.template.settings:
index.number_of_shards: 3
#index.codec: best_compression
@@ -169,16 +175,21 @@ setup.template.settings:
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here or by using the `setup` command.
-{% set dashboards_enabled = is_upgrade_run | ternary(existing_setup_dashboards.enabled, specification.kibana.dashboards.enabled) %}
-{% if dashboards_enabled | lower == 'auto' %}
- {% if group_names | intersect(['kibana', 'logging']) | count == 2 %}
-setup.dashboards.enabled: true
- {% else %}
+#
+# Below logic commented out as a workaround for problem with filebeat till the time OPS team will resolve it.
+# More info: https://github.com/opensearch-project/OpenSearch-Dashboards/issues/656#issuecomment-978036236
+# A static value is used instead:
setup.dashboards.enabled: false
- {% endif %}
-{% else %}
-setup.dashboards.enabled: {{ dashboards_enabled | lower }}
-{% endif %}
+# {% set dashboards_enabled = is_upgrade_run | ternary(existing_setup_dashboards.enabled, specification.opensearch.dashboards.enabled) %}
+# {% if dashboards_enabled | lower == 'auto' %}
+# {% if group_names | intersect(['opensearch_dashboards', 'logging']) | count == 2 %}
+# setup.dashboards.enabled: true
+# {% else %}
+#setup.dashboards.enabled: false
+# {% endif %}
+#{% else %}
+#setup.dashboards.enabled: {{ dashboards_enabled | lower }}
+#{% endif %}
# The Elasticsearch index name.
# This setting overwrites the index name defined in the dashboards and index pattern.
@@ -186,7 +197,7 @@ setup.dashboards.enabled: {{ dashboards_enabled | lower }}
{% if is_upgrade_run %}
{% set dashboards_index = 'filebeat-*' if (existing_setup_dashboards.index == 'null') else existing_setup_dashboards.index %}
{% else %}
- {% set dashboards_index = specification.kibana.dashboards.index %}
+ {% set dashboards_index = specification.opensearch.dashboards.index %}
{% endif %}
setup.dashboards.index: "{{ dashboards_index }}"
@@ -247,7 +258,7 @@ setup.kibana:
{% if setup_kibana_host is defined %}
host: {{ setup_kibana_host }}
username: kibanaserver
- password: {{ "'%s'" % opendistro_for_logging_vars.specification.kibanaserver_password | replace("'","''") }}
+ password: {{ "'%s'" % logging_vars.specification.kibanaserver_password | replace("'","''") }}
{% else %}
#host: "localhost:5601"
{% endif %}
@@ -256,8 +267,8 @@ setup.kibana:
{% if existing_setup_kibana.host is defined %}
host: {{ existing_setup_kibana.host }}
{% else %}
- {% if groups.kibana is defined and groups.logging is defined and (groups.kibana | intersect(groups.logging) | count > 0) %}
- host: {{ hostvars[groups.kibana | intersect(groups.logging) | first].ansible_hostname }}
+ {% if groups.opensearch_dashboards is defined and groups.logging is defined and (groups.opensearch_dashboards | intersect(groups.logging) | count > 0) %}
+ host: {{ hostvars[groups.opensearch_dashboards | intersect(groups.logging) | first].ansible_hostname }}
{% else %}
#host: "localhost:5601"
{% endif %}
@@ -305,7 +316,7 @@ output.elasticsearch:
# Authentication credentials - either API key or username/password.
username: logstash
{% if not is_upgrade_run %}
- password: {{ "'%s'" % opendistro_for_logging_vars.specification.logstash_password | replace("'","''") }}
+ password: {{ "'%s'" % logging_vars.specification.logstash_password | replace("'","''") }}
{% else %}
password: {{ "'%s'" % existing_output_es_password | replace("'","''") }}
{% endif %}
diff --git a/ansible/playbooks/roles/kibana/defaults/main.yml b/ansible/playbooks/roles/kibana/defaults/main.yml
deleted file mode 100644
index f07c1f3457..0000000000
--- a/ansible/playbooks/roles/kibana/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-kibana_version:
- RedHat: "1.13.1"
- Debian: "1.13.1"
-
-# Required and used for upgrade Open Distro for Elasticsearch - Kibana:
-specification:
- kibana_log_dir: /var/log/kibana
diff --git a/ansible/playbooks/roles/kibana/tasks/main.yml b/ansible/playbooks/roles/kibana/tasks/main.yml
deleted file mode 100644
index 0ed8bf4be3..0000000000
--- a/ansible/playbooks/roles/kibana/tasks/main.yml
+++ /dev/null
@@ -1,68 +0,0 @@
----
-- name: Install Kibana package
- package:
- name: "{{ _packages[ansible_os_family] }}"
- state: present
- vars:
- _packages:
- Debian:
- - opendistroforelasticsearch-kibana={{ kibana_version[ansible_os_family] }}
- RedHat:
- - opendistroforelasticsearch-kibana-{{ kibana_version[ansible_os_family] }}
- module_defaults:
- yum: {lock_timeout: "{{ yum_lock_timeout }}"}
-
-- name: Include logging configuration tasks
- include_tasks: setup-logging.yml
-
-- name: Load variables from logging/opendistro_for_elasticsearch role
- when: context is undefined or context != "upgrade"
- block:
- - name: Load variables from logging role
- include_vars:
- file: roles/logging/vars/main.yml
- name: opendistro_for_logging_vars
- when: "'logging' in group_names"
-
- - name: Load variables from opendistro_for_elasticsearch role
- include_vars:
- file: roles/opendistro_for_elasticsearch/vars/main.yml
- name: opendistro_for_data_vars
- when: "'opendistro_for_elasticsearch' in group_names"
-
-- name: Update Kibana configuration file
- template:
- backup: true
- src: kibana.yml.j2
- dest: /etc/kibana/kibana.yml
- owner: kibana
- group: root
- mode: u=rw,go=
- register: change_config
-
-- name: Restart Kibana service
- systemd:
- name: kibana
- state: restarted
- when: change_config.changed
-
-- name: Start kibana service
- service:
- name: kibana
- state: started
- enabled: true
-
-- name: Wait for kibana to start listening
- wait_for:
- host: "{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}"
- port: 5601
- delay: 5
-
-- name: Wait for Kibana to be ready
- uri:
- url: http://{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}:5601/api/status
- method: GET
- register: response
- until: "'kbn_name' in response and response.status == 200"
- retries: 120
- delay: 2
diff --git a/ansible/playbooks/roles/kibana/tasks/setup-logging.yml b/ansible/playbooks/roles/kibana/tasks/setup-logging.yml
deleted file mode 100644
index f6f248b8d1..0000000000
--- a/ansible/playbooks/roles/kibana/tasks/setup-logging.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: Create log directory for Kibana
- file:
- path: "{{ specification.kibana_log_dir }}"
- state: directory
- mode: u=rwx,go=rx
-
-- name: Create logfile for Kibana
- copy:
- dest: "{{ specification.kibana_log_dir }}/kibana.log"
- owner: kibana
- group: kibana
- mode: u=rw,go=r
- force: false
- content: ""
-
-- name: Set permissions on logfile for Kibana
- file:
- path: "{{ specification.kibana_log_dir }}/kibana.log"
- owner: kibana
- group: kibana
- mode: u=rw,go=r
-
-- name: Copy logrotate config
- template:
- dest: /etc/logrotate.d/kibana
- owner: root
- group: root
- mode: u=rw,go=r
- src: logrotate.conf.j2
diff --git a/ansible/playbooks/roles/kibana/templates/kibana.yml.j2 b/ansible/playbooks/roles/kibana/templates/kibana.yml.j2
deleted file mode 100644
index e27bf5112d..0000000000
--- a/ansible/playbooks/roles/kibana/templates/kibana.yml.j2
+++ /dev/null
@@ -1,64 +0,0 @@
-# {{ ansible_managed }}
-
-# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License").
-# You may not use this file except in compliance with the License.
-# A copy of the License is located at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# or in the "license" file accompanying this file. This file is distributed
-# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
-# express or implied. See the License for the specific language governing
-# permissions and limitations under the License.
-
-# Description:
-# Default Kibana configuration for Open Distro.
-
-server.host: "{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}"
-elasticsearch.hosts:
-{% if 'logging' in group_names %}
- # Logging hosts:
- {% for host in groups['logging'] %}
- - "https://{{hostvars[host]['ansible_hostname']}}:9200"
- {% endfor %}
-{% elif 'opendistro_for_elasticsearch' in group_names %}
- # Data hosts:
- {% for host in groups['opendistro_for_elasticsearch'] %}
- - "https://{{hostvars[host]['ansible_hostname']}}:9200"
- {% endfor %}
-{% endif %}
-
-elasticsearch.ssl.verificationMode: none
-elasticsearch.username: kibanaserver
-{% set password = 'kibanaserver' %}
-{% if context is undefined or context != 'upgrade' -%}
- {# mode: apply -#}
- {% if 'logging' in group_names -%}
- {% set password = opendistro_for_logging_vars.specification.kibanaserver_password -%}
- {% elif 'opendistro_for_elasticsearch' in group_names -%}
- {% set password = opendistro_for_data_vars.specification.kibanaserver_password -%}
- {% endif %}
-{% else -%}
- {# mode: upgrade -#}
- {% set password = existing_es_password %}
-{% endif %}
-elasticsearch.password: {{ "'%s'" % password | replace("'","''") }}
-elasticsearch.requestHeadersWhitelist: ["securitytenant","Authorization"]
-
-# Enables you to specify a file where Kibana stores log output.
-logging.dest: {{ specification.kibana_log_dir }}/kibana.log
-
-opendistro_security.multitenancy.enabled: true
-opendistro_security.multitenancy.tenants.preferred: ["Private", "Global"]
-opendistro_security.readonly_mode.roles: ["kibana_read_only"]
-
-# Provided with 1.10.1 version:
-# https://opendistro.github.io/for-elasticsearch-docs/docs/upgrade/1-10-1/
-# Use this setting if you are running kibana without https
-opendistro_security.cookie.secure: false
-
-newsfeed.enabled: false
-telemetry.optIn: false
-telemetry.enabled: false
diff --git a/ansible/playbooks/roles/kibana/templates/logrotate.conf.j2 b/ansible/playbooks/roles/kibana/templates/logrotate.conf.j2
deleted file mode 100644
index d550d97e19..0000000000
--- a/ansible/playbooks/roles/kibana/templates/logrotate.conf.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-{{ specification.kibana_log_dir }}/*.log {
- rotate 5
- daily
- compress
- missingok
- notifempty
- delaycompress
-}
diff --git a/ansible/playbooks/roles/logging/tasks/main.yml b/ansible/playbooks/roles/logging/tasks/main.yml
index 5671e42791..4c615900a2 100644
--- a/ansible/playbooks/roles/logging/tasks/main.yml
+++ b/ansible/playbooks/roles/logging/tasks/main.yml
@@ -10,8 +10,8 @@
run_once: true
no_log: true # contains sensitive data
-- name: Install and configure OpenDistro for Elasticsearch
+- name: Install and configure OpenSearch
import_role:
- name: opendistro_for_elasticsearch
+ name: opensearch
vars:
- specification: "{{ logging_vars.specification }}" # to override opendistro_for_elasticsearch specification
+ specification: "{{ logging_vars.specification }}" # to override OpenSearch specification
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-es.yml b/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-es.yml
deleted file mode 100644
index 4bed42d55f..0000000000
--- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-es.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Install elasticsearch-oss packages
- package:
- name: "{{ _packages[ansible_os_family] }}"
- state: present
- vars:
- _packages:
- Debian:
- - elasticsearch-oss={{ versions[ansible_os_family].elasticsearch_oss }}
- RedHat:
- - elasticsearch-oss-{{ versions[ansible_os_family].elasticsearch_oss }}
- register: install_elasticsearch_package
- module_defaults:
- yum: { lock_timeout: "{{ yum_lock_timeout }}" }
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-opendistro.yml b/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-opendistro.yml
deleted file mode 100644
index d38b2ebcd3..0000000000
--- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/install-opendistro.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-# NOTE: Keep in mind compatibility matrix for Open Distro https://opendistro.github.io/for-elasticsearch-docs/docs/install/plugins/#plugin-compatibility
-- name: Install opendistro-* packages
- package:
- name: "{{ _packages[ansible_os_family] }}"
- state: present
- vars:
- _packages:
- Debian:
- - opendistro-alerting={{ versions[ansible_os_family].opendistro }}
- - opendistro-index-management={{ versions[ansible_os_family].opendistro }}
- - opendistro-job-scheduler={{ versions[ansible_os_family].opendistro }}
- - opendistro-performance-analyzer={{ versions[ansible_os_family].opendistro }}
- - opendistro-security={{ versions[ansible_os_family].opendistro }}
- - opendistro-sql={{ versions[ansible_os_family].opendistro }}
- RedHat:
- - opendistro-alerting-{{ versions[ansible_os_family].opendistro }}
- - opendistro-index-management-{{ versions[ansible_os_family].opendistro }}
- - opendistro-job-scheduler-{{ versions[ansible_os_family].opendistro }}
- - opendistro-performance-analyzer-{{ versions[ansible_os_family].opendistro }}
- - opendistro-security-{{ versions[ansible_os_family].opendistro }}
- - opendistro-sql-{{ versions[ansible_os_family].opendistro }}
- register: install_opendistro_packages
- module_defaults:
- yum: { lock_timeout: "{{ yum_lock_timeout }}" }
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/main.yml b/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/main.yml
deleted file mode 100644
index 6860c69c17..0000000000
--- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Configure OS limits (open files, processes and locked-in-memory address space)
- pam_limits:
- domain: elasticsearch
- limit_type: "{{ item.limit_type }}"
- limit_item: "{{ item.limit_item }}"
- value: "{{ item.value }}"
- loop:
- - { limit_type: 'soft', limit_item: 'nofile', value: 65536 }
- - { limit_type: 'hard', limit_item: 'nofile', value: 65536 }
- - { limit_type: 'soft', limit_item: 'nproc', value: 65536 }
- - { limit_type: 'hard', limit_item: 'nproc', value: 65536 }
- - { limit_type: 'soft', limit_item: 'memlock', value: unlimited }
- - { limit_type: 'hard', limit_item: 'memlock', value: unlimited }
-
-- include_tasks: install-es.yml
-
-- include_tasks: install-opendistro.yml
-
-- name: Include configuration tasks
- include_tasks: configure-es.yml
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/patch-log4j.yml b/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/patch-log4j.yml
deleted file mode 100644
index 917c2e52d7..0000000000
--- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/patch-log4j.yml
+++ /dev/null
@@ -1,68 +0,0 @@
----
-- name: Log4j patch
- block:
- - name: "opendistro_for_elasticsearch : Log4j patch | Get archive"
- include_role:
- name: download
- tasks_from: download_file
- vars:
- file_name: "{{ log4j_file_name }}"
-
- - name: Log4j patch | Extract archive
- unarchive:
- dest: /tmp/
- src: "{{ download_directory }}/{{ log4j_file_name }}"
- remote_src: true
- list_files: true
- register: unarchive_list_files
-
- - name: Log4j patch | Copy new jars
- register: log4j_patch
- copy:
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: elasticsearch
- group: root
- mode: u=rw,g=r,o=
- remote_src: true
- loop:
- - { src: "{{ download_directory }}/{{ log4j_api }}", dest: /usr/share/elasticsearch/lib/ }
- - { src: "{{ download_directory }}/{{ log4j_api }}", dest: /usr/share/elasticsearch/performance-analyzer-rca/lib/ }
- - { src: "{{ download_directory }}/{{ log4j_api }}", dest: /usr/share/elasticsearch/plugins/opendistro-performance-analyzer/performance-analyzer-rca/lib/ }
- - { src: "{{ download_directory }}/{{ log4j_core }}", dest: /usr/share/elasticsearch/lib/ }
- - { src: "{{ download_directory }}/{{ log4j_core }}", dest: /usr/share/elasticsearch/performance-analyzer-rca/lib/ }
- - { src: "{{ download_directory }}/{{ log4j_core }}", dest: /usr/share/elasticsearch/plugins/opendistro-performance-analyzer/performance-analyzer-rca/lib/ }
- - { src: "{{ download_directory }}/{{ log4j_slfj_impl }}", dest: /usr/share/elasticsearch/plugins/opendistro_security/ }
- vars:
- log4j_api: "{{ unarchive_list_files.files | select('contains', 'log4j-api-2.17.1.jar') | first }}"
- log4j_core: "{{ unarchive_list_files.files | select('contains', 'log4j-core-2.17.1.jar') | first }}"
- log4j_slfj_impl: "{{ unarchive_list_files.files | select('contains', 'log4j-slf4j-impl-2.17.1.jar') | first }}"
-
- - name: Log4j patch - cleanup
- block:
- - name: Log4j patch | Remove old jars
- file:
- state: absent
- path: "{{ item }}"
- loop:
- - /usr/share/elasticsearch/plugins/opendistro-performance-analyzer/performance-analyzer-rca/lib/log4j-api-2.13.0.jar
- - /usr/share/elasticsearch/plugins/opendistro-performance-analyzer/performance-analyzer-rca/lib/log4j-core-2.13.0.jar
- - /usr/share/elasticsearch/performance-analyzer-rca/lib/log4j-api-2.13.0.jar
- - /usr/share/elasticsearch/performance-analyzer-rca/lib/log4j-core-2.13.0.jar
- - /usr/share/elasticsearch/lib/log4j-api-2.11.1.jar
- - /usr/share/elasticsearch/lib/log4j-core-2.11.1.jar
- - /usr/share/elasticsearch/plugins/opendistro_security/log4j-slf4j-impl-2.11.1.jar
-
- - name: Log4j patch | Delete temporary dir
- file:
- dest: "{{ download_directory }}/{{ _archive_root_dir }}"
- state: absent
- vars:
- _archive_root_dir: >-
- {{ unarchive_list_files.files | first | dirname }}
-
-- name: Restart opendistro-performance-analyzer service
- systemd:
- name: opendistro-performance-analyzer
- state: restarted
- when: log4j_patch.changed
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/defaults/main.yml b/ansible/playbooks/roles/opensearch/defaults/main.yml
similarity index 70%
rename from ansible/playbooks/roles/opendistro_for_elasticsearch/defaults/main.yml
rename to ansible/playbooks/roles/opensearch/defaults/main.yml
index cbde5b2a67..a1765a6b29 100644
--- a/ansible/playbooks/roles/opendistro_for_elasticsearch/defaults/main.yml
+++ b/ansible/playbooks/roles/opensearch/defaults/main.yml
@@ -1,18 +1,17 @@
---
# This file is meant to be also used by upgrade role
-
-versions:
- RedHat:
- elasticsearch_oss: "7.10.2"
- opendistro: "1.13.*"
- Debian:
- elasticsearch_oss: "7.10.2"
- opendistro: "1.13.*"
+file_name_version:
+ opensearch:
+ x86_64: opensearch-1.2.4-linux-x64.tar.gz
+ aarch64: opensearch-1.2.4-linux-arm64.tar.gz
+ opensearch_perftop:
+ x86_64: opensearch-perf-top-1.2.0.0-linux-x64.zip
+ # Perftop is not supported on ARM (https://github.com/opensearch-project/perftop/issues/26)
certificates:
dirs:
- certs: /etc/elasticsearch
- ca_key: /etc/elasticsearch/private
- csr: /etc/elasticsearch/csr
+ certs: /usr/share/opensearch/config
+ ca_key: /usr/share/opensearch/config
+ csr: /usr/share/opensearch/config
dn_attributes_order: ['CN', 'OU', 'O', 'L', 'S', 'C', 'DC']
files:
demo:
@@ -20,12 +19,12 @@ certificates:
cert: root-ca.pem
admin:
cert: kirk.pem
- key: kirk-key.pem
+ key: kirk-key.pem
node:
cert: esnode.pem
- key: esnode-key.pem
- opendistro_security:
- allow_unsafe_democertificates: false # if 'false' all demo files must be removed to start Elasticsearch
+ key: esnode-key.pem
+ opensearch_security:
+ allow_unsafe_democertificates: false # if 'false' all demo files must be removed to start OpenSearch
common:
subject: &common-subject
O: Epiphany
@@ -58,6 +57,6 @@ certificates:
key:
filename: epiphany-node-{{ ansible_nodename }}-key.pem
ports:
- http: 9200 # defaults to range but we want static port
- transport: 9300 # defaults to range but we want static port
+ http: 9200
+ transport: 9300
log4j_file_name: apache-log4j-2.17.1-bin.tar.gz
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/meta/main.yml b/ansible/playbooks/roles/opensearch/meta/main.yml
similarity index 100%
rename from ansible/playbooks/roles/opendistro_for_elasticsearch/meta/main.yml
rename to ansible/playbooks/roles/opensearch/meta/main.yml
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/configure-es.yml b/ansible/playbooks/roles/opensearch/tasks/configure-opensearch.yml
similarity index 53%
rename from ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/configure-es.yml
rename to ansible/playbooks/roles/opensearch/tasks/configure-opensearch.yml
index f60cf05e27..c75697d47d 100644
--- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/configure-es.yml
+++ b/ansible/playbooks/roles/opensearch/tasks/configure-opensearch.yml
@@ -3,72 +3,72 @@
- name: Ensure snapshot folder exists
file:
- path: "{{ specification.paths.repo }}/"
+ path: "{{ specification.paths.opensearch_snapshots_dir }}/"
state: directory
- owner: elasticsearch
- group: elasticsearch
+ owner: "{{ specification.opensearch_os_user }}"
+ group: "{{ specification.opensearch_os_group }}"
mode: u=rwx,go=
- name: Provide JVM configuration file
template:
- backup: yes
+ backup: true
src: jvm.options.j2
- dest: /etc/elasticsearch/jvm.options
- owner: root
- group: elasticsearch
+ dest: "{{ specification.paths.opensearch_conf_dir }}/jvm.options"
+ owner: "{{ specification.opensearch_os_user }}"
+ group: "{{ specification.opensearch_os_group }}"
mode: ug=rw,o=
register: change_jvm_config
vars:
xmx: "{{ specification.jvm_options.Xmx }}"
- name: Generate certificates
- when: not is_upgrade_run # in upgrade mode certs are required at early stage and should be already generated
+ when: not is_upgrade_run # in upgrade mode certs are required at early stage and should be already generated
block:
- # Install requirements for Ansible certificate modules
- - include_role:
+ - name: Install requirements for Ansible certificate modules
+ include_role:
name: certificate
tasks_from: install-packages.yml
- include_tasks: generate-certs.yml
-- name: Provide Elasticsearch configuration file
+- name: Provide OpenSearch configuration file
template:
- backup: yes
- src: elasticsearch.yml.j2
- dest: /etc/elasticsearch/elasticsearch.yml
- owner: root
- group: elasticsearch
+ backup: true
+ src: opensearch.yml.j2
+ dest: "{{ specification.paths.opensearch_conf_dir }}/opensearch.yml"
+ owner: "{{ specification.opensearch_os_user }}"
+ group: "{{ specification.opensearch_os_group }}"
mode: ug=rw,o=
register: change_config
vars:
node_cert_filename:
http: >-
- {{ existing_es_config['opendistro_security.ssl.http.pemcert_filepath'] if (is_upgrade_run) else
+ {{ existing_es_config['opensearch_security.ssl.http.pemcert_filepath'] if (is_upgrade_run) else
certificates.files.node.cert.filename }}
transport: >-
- {{ existing_es_config['opendistro_security.ssl.transport.pemcert_filepath'] if (is_upgrade_run) else
+ {{ existing_es_config['opensearch_security.ssl.transport.pemcert_filepath'] if (is_upgrade_run) else
certificates.files.node.cert.filename }}
node_key_filename:
http: >-
- {{ existing_es_config['opendistro_security.ssl.http.pemkey_filepath'] if (is_upgrade_run) else
+ {{ existing_es_config['opensearch_security.ssl.http.pemkey_filepath'] if (is_upgrade_run) else
certificates.files.node.key.filename }}
transport: >-
- {{ existing_es_config['opendistro_security.ssl.transport.pemkey_filepath'] if (is_upgrade_run) else
+ {{ existing_es_config['opensearch_security.ssl.transport.pemkey_filepath'] if (is_upgrade_run) else
certificates.files.node.key.filename }}
root_ca_cert_filename:
http: >-
- {{ existing_es_config['opendistro_security.ssl.http.pemtrustedcas_filepath'] if (is_upgrade_run) else
+ {{ existing_es_config['opensearch_security.ssl.http.pemtrustedcas_filepath'] if (is_upgrade_run) else
certificates.files.root_ca.cert.filename }}
transport: >-
- {{ existing_es_config['opendistro_security.ssl.transport.pemtrustedcas_filepath'] if (is_upgrade_run) else
+ {{ existing_es_config['opensearch_security.ssl.transport.pemtrustedcas_filepath'] if (is_upgrade_run) else
certificates.files.root_ca.cert.filename }}
_epiphany_subjects:
admin: "{{ certificates.files.admin.cert.subject }}"
- node: "{{ certificates.files.node.cert.subject }}"
+ node: "{{ certificates.files.node.cert.subject }}"
_epiphany_dn_attributes:
admin: "{{ certificates.dn_attributes_order | intersect(_epiphany_subjects.admin.keys()) }}"
- node: "{{ certificates.dn_attributes_order | intersect(_epiphany_subjects.node.keys()) }}"
- _epiphany_DNs:
+ node: "{{ certificates.dn_attributes_order | intersect(_epiphany_subjects.node.keys()) }}"
+ _epiphany_dns:
admin: >-
{{ _epiphany_dn_attributes.admin | zip(_epiphany_dn_attributes.admin | map('extract', _epiphany_subjects.admin))
| map('join','=') | join(',') }}
@@ -76,8 +76,8 @@
{{ _epiphany_dn_attributes.node | zip(_epiphany_dn_attributes.node | map('extract', _epiphany_subjects.node))
| map('join','=') | join(',') }}
admin_dn: >-
- {{ existing_es_config['opendistro_security.authcz.admin_dn'] if (is_upgrade_run) else
- [ _epiphany_DNs.admin ] }}
+ {{ existing_es_config['opensearch_security.authcz.admin_dn'] if (is_upgrade_run) else
+ [ _epiphany_dns.admin ] }}
_epiphany_nodes_dn: >-
{%- if groups[current_group_name] | length > 1 -%}
{%- set nodes_to_iterate = ansible_play_hosts_all -%}
@@ -86,70 +86,58 @@
{%- endif -%}
{%- for node in nodes_to_iterate -%}
{%- if loop.first -%}[{%- endif -%}
- '{{ _epiphany_DNs.node.split(',') | map('regex_replace', '^CN=.+$', 'CN=' + hostvars[node].ansible_nodename) | join(',') }}'
+ '{{ _epiphany_dns.node.split(',') | map('regex_replace', '^CN=.+$', 'CN=' + hostvars[node].ansible_nodename) | join(',') }}'
{%- if not loop.last -%},{%- else -%}]{%- endif -%}
{%- endfor -%}
nodes_dn: >-
- {{ existing_es_config['opendistro_security.nodes_dn'] if (is_upgrade_run) else
+ {{ existing_es_config['opensearch_security.nodes_dn'] if (is_upgrade_run) else
_epiphany_nodes_dn }}
- opendistro_security_allow_unsafe_democertificates: "{{ certificates.files.demo.opendistro_security.allow_unsafe_democertificates }}"
-
- http_port: "{{ is_upgrade_run | ternary(existing_es_config['http.port'], ports.http) }}"
+ opensearch_security_allow_unsafe_democertificates: "{{ certificates.files.demo.opensearch_security.allow_unsafe_democertificates }}"
+ http_port: "{{ is_upgrade_run | ternary(existing_es_config['http.port'], ports.http) }}"
transport_port: "{{ is_upgrade_run | ternary(existing_es_config['transport.port'], ports.transport) }}"
-# When 'opendistro_security.allow_unsafe_democertificates' is set to 'false' all demo certificate files must be removed,
-# otherwise elasticsearch service doesn't start.
+# When 'opensearch_security.allow_unsafe_democertificates' is set to 'false' all demo certificate files must be removed,
+# otherwise opensearch service doesn't start.
# For apply mode, demo certificate files are removed based only on their names. For upgrade mode,
# public key fingerprints are checked to protect against unintentional deletion (what takes additional time).
- name: Remove demo certificate files
include_tasks:
file: "{{ is_upgrade_run | ternary('remove-known-demo-certs.yml', 'remove-demo-certs.yml') }}"
- when: not certificates.files.demo.opendistro_security.allow_unsafe_democertificates
-
-- name: Include log4j patch
- include_tasks: patch-log4j.yml
+ when: not certificates.files.demo.opensearch_security.allow_unsafe_democertificates
-- name: Restart elasticsearch service
+- name: Restart OpenSearch service
systemd:
- name: elasticsearch
+ name: opensearch
state: restarted
- register: restart_elasticsearch
+ enabled: true
+ register: restart_opensearch
when: change_config.changed
- or log4j_patch.changed
or change_jvm_config.changed
- or install_elasticsearch_package.changed
- or (install_opendistro_packages is defined and install_opendistro_packages.changed)
-
-- name: Enable and start elasticsearch service
- systemd:
- name: elasticsearch
- state: started
- enabled: yes
- name: Change default users
when: not is_upgrade_run
block:
- - name: Wait for elasticsearch service to start up
- when: restart_elasticsearch.changed
+ - name: Wait for opensearch service to start up
+ when: restart_opensearch.changed
wait_for:
- port: 9200
+ port: "{{ ports.http }}"
host: "{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}"
- name: Set helper facts
set_fact:
- elasticsearch_endpoint: https://{{ ansible_default_ipv4.address }}:9200
+ opensearch_endpoint: https://{{ ansible_default_ipv4.address }}:{{ ports.http }}
vars:
uri_template: &uri
client_cert: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.cert.filename }}"
- client_key: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.key.filename }}"
+ client_key: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.key.filename }}"
validate_certs: false
body_format: json
- name: Check if default admin user exists
uri:
<<: *uri
- url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/admin"
+ url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/admin"
method: GET
# 404 code is used there as someone can remove admin user on its own.
status_code: [200, 404]
@@ -159,10 +147,29 @@
delay: 1
run_once: true
- - name: Set OpenDistro admin password
+ - name: Create OpenSearch admin user
+ uri:
+ <<: *uri
+ url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/admin"
+ method: PUT
+ status_code: [200]
+ body:
+ password: "{{ specification.admin_password }}"
+ reserved: "true"
+ backend_roles:
+ - "admin"
+ description: "Admin user"
+ register: uri_response
+ until: uri_response is success
+ retries: 5
+ delay: 1
+ run_once: true
+ when: admin_check_response.status == 404
+
+ - name: Set OpenSearch admin password
uri:
<<: *uri
- url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/"
+ url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/"
method: PATCH
status_code: [200]
body:
@@ -184,20 +191,35 @@
- name: Check if default kibanaserver user exists
uri:
<<: *uri
- url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/kibanaserver"
+ url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/kibanaserver"
method: GET
- status_code: [200]
+ status_code: [200, 404]
register: kibanaserver_check_response
- until: kibanaserver_check_response is success
- retries: 60
+ when: specification.kibanaserver_user_active
+
+ - name: Create default kibanaserver user
+ uri:
+ <<: *uri
+ url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/kibanaserver"
+ method: PUT
+ status_code: [200]
+ body:
+ password: "{{ specification.kibanaserver_password }}"
+ reserved: "true"
+ description: "Demo OpenSearch Dashboards user"
+ register: uri_response
+ until: uri_response is success
+ retries: 5
delay: 1
run_once: true
- when: specification.kibanaserver_user_active
+ when:
+ - kibanaserver_check_response is defined
+ - kibanaserver_check_response.status == 404
- - name: Set OpenDistro kibanaserver password
+ - name: Set kibanaserver user password
uri:
<<: *uri
- url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/"
+ url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/"
method: PATCH
status_code: [200]
body:
@@ -206,31 +228,48 @@
value:
password: "{{ specification.kibanaserver_password }}"
reserved: "true"
- description: "Kibana server user"
+ description: "Demo OpenSearch Dashboards user"
register: uri_response
until: uri_response is success
retries: 15
delay: 1
run_once: true
- when: specification.kibanaserver_user_active
+ when:
+ - kibanaserver_check_response is defined
+ - kibanaserver_check_response.status == 200
- name: Check if default logstash user exists
uri:
<<: *uri
- url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/logstash"
+ url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/logstash"
method: GET
- status_code: [200]
+ status_code: [200, 404]
register: logstash_check_response
- until: logstash_check_response is success
- retries: 60
+ when: specification.logstash_user_active
+
+ - name: Create default logstash user
+ uri:
+ <<: *uri
+ url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/logstash"
+ method: PUT
+ status_code: [200]
+ body:
+ password: "{{ specification.logstash_password }}"
+ reserved: "true"
+ description: "OpenSearch logstash user"
+ register: uri_response
+ until: uri_response is success
+ retries: 5
delay: 1
run_once: true
- when: specification.logstash_user_active
+ when:
+ - logstash_check_response is defined
+ - logstash_check_response.status == 404
- - name: Set OpenDistro logstash password
+ - name: Set OpenSearch logstash user password
uri:
<<: *uri
- url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/"
+ url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/"
method: PATCH
status_code: [200]
body:
@@ -241,18 +280,20 @@
reserved: "true"
backend_roles:
- "logstash"
- description: "Logstash user"
+ description: "OpenSearch logstash user"
register: uri_response
until: uri_response is success
retries: 3
delay: 5
run_once: true
- when: specification.logstash_user_active
+ when:
+ - logstash_check_response is defined
+ - kibanaserver_check_response.status == 200
- - name: Remove OpenDistro demo users
+ - name: Remove OpenSearch demo users
uri:
<<: *uri
- url: "{{ elasticsearch_endpoint }}/_opendistro/_security/api/internalusers/{{ item }}"
+ url: "{{ opensearch_endpoint }}/_opendistro/_security/api/internalusers/{{ item }}"
method: DELETE
status_code: [200, 404]
register: uri_response
diff --git a/ansible/playbooks/roles/opensearch/tasks/configure-sysctl.yml b/ansible/playbooks/roles/opensearch/tasks/configure-sysctl.yml
new file mode 100644
index 0000000000..113fdd1797
--- /dev/null
+++ b/ansible/playbooks/roles/opensearch/tasks/configure-sysctl.yml
@@ -0,0 +1,12 @@
+---
+- name: Set open files limit in sysctl.conf
+ sysctl:
+ name: fs.file-max
+ value: "65536"
+ state: present
+
+- name: Set maximum number of memory map areas limit in sysctl.conf
+ sysctl:
+ name: vm.max_map_count
+ value: "262144"
+ state: present
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/generate-certs.yml b/ansible/playbooks/roles/opensearch/tasks/generate-certs.yml
similarity index 77%
rename from ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/generate-certs.yml
rename to ansible/playbooks/roles/opensearch/tasks/generate-certs.yml
index 898d6cbe35..e32e40794f 100644
--- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/generate-certs.yml
+++ b/ansible/playbooks/roles/opensearch/tasks/generate-certs.yml
@@ -5,40 +5,37 @@
file:
state: directory
path: "{{ certificates.dirs.ca_key }}"
- owner: root
- group: elasticsearch
- mode: u=rwx,g=rx,o= # elasticsearch.service requires 'rx' for group
+ owner: "{{ specification.opensearch_os_user }}"
+ group: "{{ specification.opensearch_os_group }}"
+ mode: u=rwx,g=rwx,o=
# csr files are kept only for idempotency
- name: Create directory for CSR files
file:
state: directory
path: "{{ certificates.dirs.csr }}"
- owner: root
- group: elasticsearch
- mode: u=rwx,g=rx,o= # CSR file doesn't contain private key
+ owner: "{{ specification.opensearch_os_user }}"
+ group: "{{ specification.opensearch_os_group }}"
+ mode: u=rwx,g=rwx,o= # CSR file doesn't contain private key
- name: Generate keys and certificates on first node
when: inventory_hostname == ansible_play_hosts_all[0]
module_defaults:
community.crypto.openssl_privatekey:
- size: 2048 # based on ODFE docs
+ size: 2048 # based on ODFE docs
type: RSA
mode: u=rw,go=
- owner: root
- group: elasticsearch
+ owner: "{{ specification.opensearch_os_user }}"
format: pkcs8
community.crypto.openssl_csr:
mode: u=rw,g=r,o=
- owner: root
- group: elasticsearch
+ owner: "{{ specification.opensearch_os_user }}"
use_common_name_for_san: false
community.crypto.x509_certificate:
selfsigned_digest: sha256
ownca_digest: sha256
mode: u=rw,g=r,o=
- owner: root
- group: elasticsearch
+ owner: "{{ specification.opensearch_os_user }}"
block:
# --- Generate CA root certificate ---
@@ -48,10 +45,10 @@
return_content: false
register: ca_key
- - name: Generate CSR for root CA # based on ODFE demo cert (root-ca.pem)
+ - name: Generate CSR for root CA # based on ODFE demo cert (root-ca.pem)
community.crypto.openssl_csr:
path: "{{ certificates.dirs.csr }}/{{ certificates.files.root_ca.cert.filename | regex_replace('\\..+$', '.csr') }}"
- privatekey_path: "{{ ca_key.filename }}" # 'filename' contains full path
+ privatekey_path: "{{ ca_key.filename }}" # 'filename' contains full path
CN: "{{ certificates.files.root_ca.cert.subject.CN }}"
OU: "{{ certificates.files.root_ca.cert.subject.OU }}"
O: "{{ certificates.files.root_ca.cert.subject.O }}"
@@ -80,14 +77,14 @@
- name: Generate private key for admin certificate
community.crypto.openssl_privatekey:
path: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.key.filename }}"
- format: pkcs8 # specified explicitly since this format is required
+ format: pkcs8 # specified explicitly since this format is required
return_content: false
register: admin_key
- - name: Generate CSR for admin certificate # based on ODFE demo cert (kirk.pem)
+ - name: Generate CSR for admin certificate # based on ODFE demo cert (kirk.pem)
community.crypto.openssl_csr:
path: "{{ certificates.dirs.csr }}/{{ certificates.files.admin.cert.filename | regex_replace('\\..+$', '.csr') }}"
- privatekey_path: "{{ admin_key.filename }}" # 'filename' contains full path
+ privatekey_path: "{{ admin_key.filename }}" # 'filename' contains full path
CN: "{{ certificates.files.admin.cert.subject.CN }}"
OU: "{{ certificates.files.admin.cert.subject.OU }}"
O: "{{ certificates.files.admin.cert.subject.O }}"
@@ -122,14 +119,14 @@
module_defaults:
copy:
owner: root
- group: elasticsearch
+ group: "{{ specification.opensearch_os_group }}"
block:
- name: Get certificate files from the first host
slurp:
src: "{{ item }}"
delegate_to: "{{ ansible_play_hosts_all[0] }}"
register: slurp_certs
- no_log: true # sensitive data
+ no_log: true # sensitive data
loop:
- "{{ certificates.dirs.ca_key }}/{{ certificates.files.root_ca.key.filename }}"
- "{{ certificates.dirs.certs }}/{{ certificates.files.root_ca.cert.filename }}"
@@ -139,29 +136,29 @@
- name: Copy CA private key to other hosts
copy:
content: "{{ slurp_certs.results[0].content | b64decode }}"
- dest: "{{ certificates.dirs.ca_key }}/{{ certificates.files.root_ca.key.filename }}"
+ dest: "{{ certificates.dirs.ca_key }}/{{ certificates.files.root_ca.key.filename }}"
mode: u=rw,go=
- no_log: true # sensitive data
+ no_log: true # sensitive data
- name: Copy root CA to other hosts
copy:
content: "{{ slurp_certs.results[1].content | b64decode }}"
- dest: "{{ certificates.dirs.certs }}/{{ certificates.files.root_ca.cert.filename }}"
+ dest: "{{ certificates.dirs.certs }}/{{ certificates.files.root_ca.cert.filename }}"
mode: u=rw,g=r,o=
- name: Copy admin private key to other hosts
copy:
content: "{{ slurp_certs.results[2].content | b64decode }}"
- dest: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.key.filename }}"
+ dest: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.key.filename }}"
mode: u=rw,go=
- no_log: true # sensitive data
+ no_log: true # sensitive data
- name: Copy admin certificate to other hosts
copy:
content: "{{ slurp_certs.results[3].content | b64decode }}"
- dest: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.cert.filename }}"
+ dest: "{{ certificates.dirs.certs }}/{{ certificates.files.admin.cert.filename }}"
mode: u=rw,g=r,o=
- no_log: true # sensitive data
+ no_log: true # sensitive data
# --- Generate node certificate (each node has its own) ---
@@ -171,16 +168,16 @@
format: pkcs8
size: 2048
type: RSA
- mode: u=rw,g=r,o= # elasticsearch.service requires 'r' for group
- owner: root
- group: elasticsearch
+ mode: u=rw,g=r,o=
+ owner: "{{ specification.opensearch_os_user }}"
+ group: "{{ specification.opensearch_os_group }}"
return_content: false
register: node_key
-- name: Generate CSR for node certificate # based on ODFE demo cert (esnode.pem)
+- name: Generate CSR for node certificate # based on ODFE demo cert (esnode.pem)
community.crypto.openssl_csr:
path: "{{ certificates.dirs.csr }}/{{ certificates.files.node.cert.filename | regex_replace('\\..+$', '.csr') }}"
- privatekey_path: "{{ node_key.filename }}" # 'filename' contains full path
+ privatekey_path: "{{ node_key.filename }}" # 'filename' contains full path
CN: "{{ certificates.files.node.cert.subject.CN }}"
OU: "{{ certificates.files.node.cert.subject.OU }}"
O: "{{ certificates.files.node.cert.subject.O }}"
@@ -199,8 +196,8 @@
subjectAltName: "{{ _dns_list + [ 'IP:' + ansible_default_ipv4.address ] }}"
use_common_name_for_san: false
mode: u=rw,g=r,o=
- owner: root
- group: elasticsearch
+ owner: "{{ specification.opensearch_os_user }}"
+ group: "{{ specification.opensearch_os_group }}"
register: node_csr
vars:
_unique_hostnames: "{{ [ansible_hostname, ansible_nodename, ansible_fqdn] | unique }}"
@@ -217,5 +214,5 @@
ownca_not_after: "{{ certificates.files.node.cert.ownca_not_after }}"
ownca_digest: sha256
mode: u=rw,go=r
- owner: root
- group: elasticsearch
+ owner: "{{ specification.opensearch_os_user }}"
+ group: "{{ specification.opensearch_os_group }}"
diff --git a/ansible/playbooks/roles/opensearch/tasks/install-opensearch.yml b/ansible/playbooks/roles/opensearch/tasks/install-opensearch.yml
new file mode 100644
index 0000000000..6ed87b4157
--- /dev/null
+++ b/ansible/playbooks/roles/opensearch/tasks/install-opensearch.yml
@@ -0,0 +1,78 @@
+---
+- name: Download OpenSearch
+ include_role:
+ name: download
+ tasks_from: download_file
+ vars:
+ file_name: "{{ file_name_version.opensearch[ansible_architecture] }}"
+
+- name: Download PerfTop
+ include_role:
+ name: download
+ tasks_from: download_file
+ vars:
+ file_name: "{{ file_name_version.opensearch_perftop[ansible_architecture] }}"
+ when: ansible_architecture == "x86_64" # Perftop is not yet supported on ARM (https://github.com/opensearch-project/perftop/issues/26)
+
+- name: Prepare tasks group
+ when: not is_upgrade_run
+ block:
+ - name: Ensure OpenSearch service OS group exists
+ group:
+ name: "{{ specification.opensearch_os_group }}"
+ state: present
+
+ - name: Ensure OpenSearch service OS user exists
+ user:
+ name: "{{ specification.opensearch_os_user }}"
+ state: present
+ shell: /bin/bash
+ groups: "{{ specification.opensearch_os_group }}"
+ home: "{{ specification.paths.opensearch_home }}"
+ create_home: true
+
+ - name: Ensure directory structure exists
+ file:
+ path: "{{ specification.paths.opensearch_perftop_dir }}"
+ state: directory
+ owner: "{{ specification.opensearch_os_user }}"
+ group: "{{ specification.opensearch_os_group }}"
+ mode: u=rwx,go=rx
+ recurse: true
+ when: ansible_architecture == "x86_64" # Perftop is not yet supported on ARM (https://github.com/opensearch-project/perftop/issues/26)
+
+ - name: Ensure directory structure exists
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ specification.opensearch_os_user }}"
+ group: "{{ specification.opensearch_os_group }}"
+ mode: u=rwx,go=rx
+ loop:
+ - "{{ specification.paths.opensearch_log_dir }}"
+ - "{{ specification.paths.opensearch_conf_dir }}"
+ - "{{ specification.paths.opensearch_data_dir }}"
+ - "{{ certificates.dirs.certs }}"
+
+- name: Extract OpenSearch tar file
+ unarchive:
+ src: "{{ download_directory }}/{{ file_name_version.opensearch[ansible_architecture] }}"
+ dest: "{{ specification.paths.opensearch_home }}"
+ owner: "{{ specification.opensearch_os_user }}"
+ remote_src: true
+ extra_opts:
+ - --strip-components=1
+
+- name: Extract OpenSearch PerfTop tar file
+ unarchive:
+ src: "{{ download_directory }}/{{ file_name_version.opensearch_perftop[ansible_architecture] }}"
+ dest: "{{ specification.paths.opensearch_perftop_dir }}"
+ owner: "{{ specification.opensearch_os_user }}"
+ remote_src: true
+ when: ansible_architecture == "x86_64" # Perftop is not yet supported on ARM (https://github.com/opensearch-project/perftop/issues/26)
+
+- name: Create opensearch.service unit file
+ template:
+ src: roles/opensearch/templates/opensearch.service.j2
+ dest: "/etc/systemd/system/opensearch.service"
+ mode: u=rw,go=r
diff --git a/ansible/playbooks/roles/opensearch/tasks/main.yml b/ansible/playbooks/roles/opensearch/tasks/main.yml
new file mode 100644
index 0000000000..9fdaff2c44
--- /dev/null
+++ b/ansible/playbooks/roles/opensearch/tasks/main.yml
@@ -0,0 +1,23 @@
+---
+- name: Configure OS limits (open files, processes and locked-in-memory address space)
+ pam_limits:
+ domain: opensearch
+ limit_type: "{{ item.limit_type }}"
+ limit_item: "{{ item.limit_item }}"
+ value: "{{ item.value }}"
+ loop:
+ - {limit_type: 'soft', limit_item: 'nofile', value: 65536}
+ - {limit_type: 'hard', limit_item: 'nofile', value: 65536}
+ - {limit_type: 'soft', limit_item: 'nproc', value: 65536}
+ - {limit_type: 'hard', limit_item: 'nproc', value: 65536}
+ - {limit_type: 'soft', limit_item: 'memlock', value: unlimited}
+ - {limit_type: 'hard', limit_item: 'memlock', value: unlimited}
+
+- name: Tune the system settings
+ include_tasks: configure-sysctl.yml
+
+- name: Include installation tasks
+ include_tasks: install-opensearch.yml
+
+- name: Include configuration tasks
+ include_tasks: configure-opensearch.yml
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/remove-demo-certs.yml b/ansible/playbooks/roles/opensearch/tasks/remove-demo-certs.yml
similarity index 100%
rename from ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/remove-demo-certs.yml
rename to ansible/playbooks/roles/opensearch/tasks/remove-demo-certs.yml
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/remove-known-demo-certs.yml b/ansible/playbooks/roles/opensearch/tasks/remove-known-demo-certs.yml
similarity index 73%
rename from ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/remove-known-demo-certs.yml
rename to ansible/playbooks/roles/opensearch/tasks/remove-known-demo-certs.yml
index 55e0f8d07d..077adc1211 100644
--- a/ansible/playbooks/roles/opendistro_for_elasticsearch/tasks/remove-known-demo-certs.yml
+++ b/ansible/playbooks/roles/opensearch/tasks/remove-known-demo-certs.yml
@@ -6,12 +6,12 @@
vars:
demo_files:
certs:
- - { filename: "{{ certificates.files.demo.admin.cert }}", public_key_sha1_fingerprint: 53:01:c4:6a:c8:9c:dd:ab:1d:2d:d9:9a:a9:c6:01:43:38:66:2c:ee }
- - { filename: "{{ certificates.files.demo.node.cert }}", public_key_sha1_fingerprint: 6e:d8:94:2c:4a:a1:d2:b4:d4:5e:65:0f:66:d6:a9:35:23:a2:77:52 }
- - { filename: "{{ certificates.files.demo.root_ca.cert }}", public_key_sha1_fingerprint: 4c:8a:cc:d1:9f:a5:23:6f:4a:9d:d3:bb:8f:0d:05:ab:5b:e3:f4:59 }
+ - {filename: "{{ certificates.files.demo.admin.cert }}", public_key_sha1_fingerprint: 53:01:c4:6a:c8:9c:dd:ab:1d:2d:d9:9a:a9:c6:01:43:38:66:2c:ee}
+ - {filename: "{{ certificates.files.demo.node.cert }}", public_key_sha1_fingerprint: 6e:d8:94:2c:4a:a1:d2:b4:d4:5e:65:0f:66:d6:a9:35:23:a2:77:52}
+ - {filename: "{{ certificates.files.demo.root_ca.cert }}", public_key_sha1_fingerprint: 4c:8a:cc:d1:9f:a5:23:6f:4a:9d:d3:bb:8f:0d:05:ab:5b:e3:f4:59}
keys:
- - { filename: "{{ certificates.files.demo.admin.key }}", public_key_sha1_fingerprint: 53:01:c4:6a:c8:9c:dd:ab:1d:2d:d9:9a:a9:c6:01:43:38:66:2c:ee }
- - { filename: "{{ certificates.files.demo.node.key }}", public_key_sha1_fingerprint: 6e:d8:94:2c:4a:a1:d2:b4:d4:5e:65:0f:66:d6:a9:35:23:a2:77:52 }
+ - {filename: "{{ certificates.files.demo.admin.key }}", public_key_sha1_fingerprint: 53:01:c4:6a:c8:9c:dd:ab:1d:2d:d9:9a:a9:c6:01:43:38:66:2c:ee}
+ - {filename: "{{ certificates.files.demo.node.key }}", public_key_sha1_fingerprint: 6e:d8:94:2c:4a:a1:d2:b4:d4:5e:65:0f:66:d6:a9:35:23:a2:77:52}
block:
- name: Check if known demo certificates exist
stat:
@@ -60,5 +60,5 @@
label: "{{ item.filename }}"
vars:
_query: "[*].{ filename: item, public_key_sha1_fingerprint: public_key_fingerprints.sha1 }"
- _demo_certs: "{{ _demo_certs_info.results | json_query(_query) }}"
+ _demo_certs: "{{ _demo_certs_info.results | json_query(_query) }}"
_demo_cert_keys: "{{ _demo_cert_keys_info.results | json_query(_query) }}"
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/templates/jvm.options.j2 b/ansible/playbooks/roles/opensearch/templates/jvm.options.j2
similarity index 81%
rename from ansible/playbooks/roles/opendistro_for_elasticsearch/templates/jvm.options.j2
rename to ansible/playbooks/roles/opensearch/templates/jvm.options.j2
index e91e6b6635..75beba6b52 100644
--- a/ansible/playbooks/roles/opendistro_for_elasticsearch/templates/jvm.options.j2
+++ b/ansible/playbooks/roles/opensearch/templates/jvm.options.j2
@@ -51,7 +51,7 @@
14-:-XX:InitiatingHeapOccupancyPercent=30
## JVM temporary directory
--Djava.io.tmpdir=${ES_TMPDIR}
+-Djava.io.tmpdir=${OPENSEARCH_TMPDIR}
## heap dumps
@@ -61,25 +61,20 @@
# specify an alternative path for heap dumps; ensure the directory exists and
# has sufficient space
--XX:HeapDumpPath=/var/lib/elasticsearch
+-XX:HeapDumpPath=/var/lib/opensearch
# specify an alternative path for JVM fatal error logs
--XX:ErrorFile=/var/log/elasticsearch/hs_err_pid%p.log
+-XX:ErrorFile=/var/log/opensearch/hs_err_pid%p.log
## JDK 8 GC logging
8:-XX:+PrintGCDetails
8:-XX:+PrintGCDateStamps
8:-XX:+PrintTenuringDistribution
8:-XX:+PrintGCApplicationStoppedTime
-8:-Xloggc:/var/log/elasticsearch/gc.log
+8:-Xloggc:/var/log/opensearch/gc.log
8:-XX:+UseGCLogFileRotation
8:-XX:NumberOfGCLogFiles=32
8:-XX:GCLogFileSize=64m
# JDK 9+ GC logging
-9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m
-
-## OpenDistro Performance Analyzer
--Dclk.tck=100
--Djdk.attach.allowAttachSelf=true
--Djava.security.policy=file:///usr/share/elasticsearch/plugins/opendistro_performance_analyzer/pa_config/es_security.policy
+9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/opensearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m
diff --git a/ansible/playbooks/roles/opensearch/templates/opensearch.service.j2 b/ansible/playbooks/roles/opensearch/templates/opensearch.service.j2
new file mode 100644
index 0000000000..a886e79dd1
--- /dev/null
+++ b/ansible/playbooks/roles/opensearch/templates/opensearch.service.j2
@@ -0,0 +1,51 @@
+[Unit]
+Description=OpenSearch
+Wants=network-online.target
+After=network-online.target
+
+[Service]
+RuntimeDirectory=opensearch
+PrivateTmp=true
+
+WorkingDirectory={{ specification.paths.opensearch_home }}
+
+User={{ specification.opensearch_os_user }}
+Group={{ specification.opensearch_os_user }}
+
+ExecStart={{ specification.paths.opensearch_home }}/bin/opensearch -p {{ specification.paths.opensearch_home }}/opensearch.pid -q
+
+StandardOutput=journal
+StandardError=inherit
+
+# Specifies the maximum file descriptor number that can be opened by this process
+LimitNOFILE=65536
+
+# Specifies the memory lock settings
+LimitMEMLOCK=infinity
+
+# Specifies the maximum number of processes
+LimitNPROC=4096
+
+# Specifies the maximum size of virtual memory
+LimitAS=infinity
+
+# Specifies the maximum file size
+LimitFSIZE=infinity
+
+# Disable timeout logic and wait until process is stopped
+TimeoutStopSec=0
+
+# SIGTERM signal is used to stop the Java process
+KillSignal=SIGTERM
+
+# Send the signal only to the JVM rather than its control group
+KillMode=process
+
+# Java process is never killed
+SendSIGKILL=no
+
+# When a JVM receives a SIGTERM signal it exits with code 143
+SuccessExitStatus=143
+
+[Install]
+WantedBy=multi-user.target
diff --git a/ansible/playbooks/roles/opendistro_for_elasticsearch/templates/elasticsearch.yml.j2 b/ansible/playbooks/roles/opensearch/templates/opensearch.yml.j2
similarity index 54%
rename from ansible/playbooks/roles/opendistro_for_elasticsearch/templates/elasticsearch.yml.j2
rename to ansible/playbooks/roles/opensearch/templates/opensearch.yml.j2
index 0214fcc7d0..2a99a3bb03 100644
--- a/ansible/playbooks/roles/opendistro_for_elasticsearch/templates/elasticsearch.yml.j2
+++ b/ansible/playbooks/roles/opensearch/templates/opensearch.yml.j2
@@ -1,16 +1,10 @@
#jinja2: lstrip_blocks: True
# {{ ansible_managed }}
-# ======================== Elasticsearch Configuration =========================
+# ======================== OpenSearch Configuration =========================
#
-# NOTE: Elasticsearch comes with reasonable defaults for most settings.
-# Before you set out to tweak and tune the configuration, make sure you
-# understand what are you trying to accomplish and the consequences.
-#
-# The primary way of configuring a node is via this file. This template lists
-# the most important settings you may want to configure for a production cluster.
-#
-# Please consult the documentation for further information on configuration options:
-# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
+# ------------------- Legacy Clients Compability Flag -------------------------
+# https://opensearch.org/docs/latest/clients/agents-and-ingestion-tools/index/
+compatibility.override_main_response_version: true
#
# ---------------------------------- Cluster -----------------------------------
#
@@ -32,15 +26,15 @@ node.name: {{ ansible_hostname }}
#
# Path to directory where to store the data (separate multiple locations by comma):
#
-path.data: {{ specification.paths.data }}
+path.data: {{ specification.paths.opensearch_data_dir }}
#
# Path to directory where the shared storage should be mounted:
#
-path.repo: {{ specification.paths.repo }}
+path.repo: {{ specification.paths.opensearch_snapshots_dir }}
#
# Path to log files:
#
-path.logs: {{ specification.paths.logs }}
+path.logs: {{ specification.paths.opensearch_log_dir }}
#
# ----------------------------------- Memory -----------------------------------
#
@@ -52,7 +46,7 @@ path.logs: {{ specification.paths.logs }}
# on the system and that the owner of the process is allowed to use this
# limit.
#
-# Elasticsearch performs poorly when the system is swapping the memory.
+# OpenSearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
@@ -76,9 +70,9 @@ transport.port: {{ transport_port }}
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
{% if groups[current_group_name] | length > 1 -%}
-discovery.seed_hosts: [{% for host in groups[current_group_name] %}"{{hostvars[host]['ansible_default_ipv4']['address']}}"{%- if not loop.last -%},{% endif %}{% endfor %}]
+discovery.seed_hosts: [{% for host in groups[current_group_name] %}"{{ hostvars[host]['ansible_hostname'] }}"{%- if not loop.last -%},{% endif %}{% endfor %}]
{% else %}
-discovery.seed_hosts: ["{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}"]
+discovery.seed_hosts: ["{{ ansible_hostname }}"]
{% endif %}
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
@@ -87,7 +81,7 @@ discovery.seed_hosts: ["{{ ansible_default_ipv4.address | default(ansible_all_ip
cluster.initial_master_nodes: []
{% else %}
{% if groups[current_group_name] | length > 1 %}
-cluster.initial_master_nodes: [{% for host in groups[current_group_name] %}"{{hostvars[host]['ansible_hostname']}}"{%- if not loop.last -%},{% endif %}{% endfor %}]
+cluster.initial_master_nodes: [{% for host in groups[current_group_name] %}"{{ hostvars[host]['ansible_hostname'] }}"{%- if not loop.last -%},{% endif %}{% endfor %}]
{% else %}
cluster.initial_master_nodes: ["{{ ansible_hostname }}"]
{% endif %}
@@ -109,33 +103,33 @@ cluster.initial_master_nodes: ["{{ ansible_hostname }}"]
#
#action.destructive_requires_name: true
-######## Start OpenDistro for Elasticsearch Security Configuration ########
+######## OpenSearch Security Configuration ########
# WARNING: revise all the lines below before you go into production
-opendistro_security.ssl.transport.pemcert_filepath: {{ node_cert_filename.transport }}
-opendistro_security.ssl.transport.pemkey_filepath: {{ node_key_filename.transport }}
-opendistro_security.ssl.transport.pemtrustedcas_filepath: {{ root_ca_cert_filename.transport }}
-opendistro_security.ssl.transport.enforce_hostname_verification: {{ specification.opendistro_security.ssl.transport.enforce_hostname_verification | lower }}
-opendistro_security.ssl.http.enabled: true
-opendistro_security.ssl.http.pemcert_filepath: {{ node_cert_filename.http }}
-opendistro_security.ssl.http.pemkey_filepath: {{ node_key_filename.http }}
-opendistro_security.ssl.http.pemtrustedcas_filepath: {{ root_ca_cert_filename.http }}
-opendistro_security.allow_unsafe_democertificates: {{ opendistro_security_allow_unsafe_democertificates | lower }}
-opendistro_security.allow_default_init_securityindex: true
-opendistro_security.authcz.admin_dn:
+plugins.security.ssl.transport.pemcert_filepath: "{{ certificates.dirs.certs }}/{{ node_cert_filename.transport }}"
+plugins.security.ssl.transport.pemkey_filepath: "{{ certificates.dirs.ca_key }}/{{ node_key_filename.transport }}"
+plugins.security.ssl.transport.pemtrustedcas_filepath: "{{ certificates.dirs.certs }}/{{ root_ca_cert_filename.transport }}"
+plugins.security.ssl.transport.enforce_hostname_verification: {{ specification.opensearch_security.ssl.transport.enforce_hostname_verification | lower }}
+plugins.security.ssl.http.enabled: true
+plugins.security.ssl.http.pemcert_filepath: "{{ certificates.dirs.certs }}/{{ node_cert_filename.http }}"
+plugins.security.ssl.http.pemkey_filepath: "{{ certificates.dirs.ca_key }}/{{ node_key_filename.http }}"
+plugins.security.ssl.http.pemtrustedcas_filepath: "{{ certificates.dirs.certs }}/{{ root_ca_cert_filename.http }}"
+plugins.security.allow_unsafe_democertificates: {{ opensearch_security_allow_unsafe_democertificates | lower }}
+plugins.security.allow_default_init_securityindex: true
+plugins.security.authcz.admin_dn:
{% for dn in admin_dn %}
- '{{ dn }}'
{% endfor %}
{% if nodes_dn | count > 0 %}
-opendistro_security.nodes_dn:
+plugins.security.nodes_dn:
{% for dn in nodes_dn %}
- '{{ dn }}'
{% endfor %}
{% endif %}
-opendistro_security.audit.type: internal_elasticsearch
-opendistro_security.enable_snapshot_restore_privilege: true
-opendistro_security.check_snapshot_restore_write_privileges: true
-opendistro_security.restapi.roles_enabled: ["all_access", "security_rest_api_access"]
+plugins.security.audit.type: internal_opensearch
+plugins.security.enable_snapshot_restore_privilege: true
+plugins.security.check_snapshot_restore_write_privileges: true
+plugins.security.restapi.roles_enabled: ["all_access", "security_rest_api_access"]
cluster.routing.allocation.disk.threshold_enabled: false
node.max_local_storage_nodes: 3
-######## End OpenDistro for Elasticsearch Security Configuration ########
+######## End OpenSearch Security Configuration ########
diff --git a/ansible/playbooks/roles/opensearch_dashboards/defaults/main.yml b/ansible/playbooks/roles/opensearch_dashboards/defaults/main.yml
new file mode 100644
index 0000000000..cdda7d4123
--- /dev/null
+++ b/ansible/playbooks/roles/opensearch_dashboards/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+file_name_version:
+ opensearch_dashboards:
+ x86_64: opensearch-dashboards-1.2.0-linux-x64.tar.gz
+ aarch64: opensearch-dashboards-1.2.0-linux-arm64.tar.gz
+opensearch_api_port: 9200
+java: "{{ es_java | default('java-1.8.0-openjdk.x86_64') }}"
diff --git a/ansible/playbooks/roles/opensearch_dashboards/handlers/main.yml b/ansible/playbooks/roles/opensearch_dashboards/handlers/main.yml
new file mode 100644
index 0000000000..ded1b9a7a3
--- /dev/null
+++ b/ansible/playbooks/roles/opensearch_dashboards/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+- name: Restart dashboards
+ systemd:
+ name: opensearch-dashboards
+ state: restarted
+ enabled: true
diff --git a/ansible/playbooks/roles/opensearch_dashboards/tasks/dashboards.yml b/ansible/playbooks/roles/opensearch_dashboards/tasks/dashboards.yml
new file mode 100644
index 0000000000..d05f8c27e1
--- /dev/null
+++ b/ansible/playbooks/roles/opensearch_dashboards/tasks/dashboards.yml
@@ -0,0 +1,51 @@
+---
+- name: Download OpenSearch dashboards
+ include_role:
+ name: download
+ tasks_from: download_file
+ vars:
+ file_name: "{{ file_name_version.opensearch_dashboards[ansible_architecture] }}"
+
+- name: Create OpenSearch Dashboards OS group
+ group:
+ name: "{{ specification.dashboards_os_group }}"
+ state: present
+
+- name: Create OpenSearch Dashboards OS user
+ user:
+ name: "{{ specification.dashboards_os_user }}"
+ state: present
+ shell: /bin/bash
+ group: "{{ specification.dashboards_os_group }}"
+ home: "{{ specification.paths.dashboards_home }}"
+
+- name: Extract OpenSearch Dashboards tar file
+ unarchive:
+ src: "{{ download_directory }}/{{ file_name_version.opensearch_dashboards[ansible_architecture] }}"
+ dest: "{{ specification.paths.dashboards_home }}"
+ owner: "{{ specification.dashboards_os_user }}"
+ remote_src: true
+ extra_opts:
+ - --strip-components=1
+
+- name: Set opensearch dashboards hosts as fact
+ set_fact:
+ opensearch_nodes_dashboards: |-
+ {% for item in groups['opensearch_dashboards'] -%}
+ https://{{ item }}:{{ opensearch_api_port }}{% if not loop.last %}","{% endif %}
+ {%- endfor %}
+
+- name: Copy configuration file
+ template:
+ src: opensearch_dashboards.yml.j2
+ dest: "{{ specification.paths.dashboards_conf_dir }}/opensearch_dashboards.yml"
+ owner: "{{ specification.dashboards_os_user }}"
+ group: "{{ specification.dashboards_os_user }}"
+ mode: u=rw,go=r
+ backup: true
+
+- name: Create opensearch-dashboards.service unit file
+ template:
+ src: opensearch-dashboards.service.j2
+ dest: /etc/systemd/system/opensearch-dashboards.service
+ mode: u=rw,go=r
diff --git a/ansible/playbooks/roles/opensearch_dashboards/tasks/main.yml b/ansible/playbooks/roles/opensearch_dashboards/tasks/main.yml
new file mode 100644
index 0000000000..ed9fc2a3cb
--- /dev/null
+++ b/ansible/playbooks/roles/opensearch_dashboards/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+- name: Include dashboards installation
+ include_tasks: dashboards.yml
+
+- name: Make sure OpenSearch Dashboards is started
+ service:
+ name: opensearch-dashboards
+ state: started
+ enabled: true
+
+- name: Get all the installed dashboards plugins
+ command: "{{ specification.paths.dashboards_plugin_bin_path }} list"
+ become: true
+ become_user: "{{ specification.dashboards_os_user }}"
+ register: list_plugins
+
+- name: Show all the installed dashboards plugins
+ debug:
+ msg: "{{ list_plugins.stdout }}"
diff --git a/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch-dashboards.service.j2 b/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch-dashboards.service.j2
new file mode 100644
index 0000000000..ee4ec7dd67
--- /dev/null
+++ b/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch-dashboards.service.j2
@@ -0,0 +1,48 @@
+[Unit]
+Description=OpenSearch Dashboards
+Wants=network-online.target
+After=network-online.target
+
+[Service]
+RuntimeDirectory=opensearch-dashboards
+PrivateTmp=true
+
+WorkingDirectory={{ specification.paths.dashboards_home }}
+
+User={{ specification.dashboards_os_user }}
+Group={{ specification.dashboards_os_user }}
+
+ExecStart={{ specification.paths.dashboards_home }}/bin/opensearch-dashboards -q
+
+StandardOutput=journal
+StandardError=inherit
+
+# Specifies the maximum file descriptor number that can be opened by this process
+LimitNOFILE=65536
+
+# Specifies the maximum number of processes
+LimitNPROC=4096
+
+# Specifies the maximum size of virtual memory
+LimitAS=infinity
+
+# Specifies the maximum file size
+LimitFSIZE=infinity
+
+# Disable timeout logic and wait until process is stopped
+TimeoutStopSec=0
+
+# SIGTERM signal is used to stop the Java process
+KillSignal=SIGTERM
+
+# Send the signal only to the JVM rather than its control group
+KillMode=process
+
+# Java process is never killed
+SendSIGKILL=no
+
+# When a JVM receives a SIGTERM signal it exits with code 143
+SuccessExitStatus=143
+
+[Install]
+WantedBy=multi-user.target
diff --git a/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch_dashboards.yml.j2 b/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch_dashboards.yml.j2
new file mode 100644
index 0000000000..49d0e5885c
--- /dev/null
+++ b/ansible/playbooks/roles/opensearch_dashboards/templates/opensearch_dashboards.yml.j2
@@ -0,0 +1,13 @@
+server.port: 5601
+server.host: "{{ inventory_hostname }}"
+opensearch.hosts: ["{{ opensearch_nodes_dashboards }}"]
+opensearch.ssl.verificationMode: none
+opensearch.username: "{{ specification.dashboards_user }}"
+opensearch.password: "{{ specification.dashboards_user_password }}"
+opensearch.requestHeadersWhitelist: [ authorization,securitytenant ]
+
+opensearch_security.multitenancy.enabled: true
+opensearch_security.multitenancy.tenants.preferred: ["Private", "Global"]
+opensearch_security.readonly_mode.roles: ["kibana_read_only"]
+# Use this setting if you are running dashboards without https
+opensearch_security.cookie.secure: false
diff --git a/ansible/playbooks/roles/preflight/defaults/main.yml b/ansible/playbooks/roles/preflight/defaults/main.yml
index 32591a7e38..51aca46714 100644
--- a/ansible/playbooks/roles/preflight/defaults/main.yml
+++ b/ansible/playbooks/roles/preflight/defaults/main.yml
@@ -37,10 +37,9 @@ unsupported_roles:
- zookeeper
- haproxy
- logging
+ - opensearch
+ - opensearch_dashboards
- elasticsearch_curator
- - opendistro_for_elasticsearch
- - elasticsearch
- - kibana
- filebeat
- prometheus
- grafana
@@ -74,10 +73,9 @@ unsupported_roles:
- zookeeper
- haproxy
- logging
+ - opensearch
+ - opensearch_dashboards
- elasticsearch_curator
- - opendistro_for_elasticsearch
- - elasticsearch
- - kibana
- filebeat
- prometheus
- grafana
diff --git a/ansible/playbooks/roles/recovery/defaults/main.yml b/ansible/playbooks/roles/recovery/defaults/main.yml
index 88be45c8a6..e105375aa7 100644
--- a/ansible/playbooks/roles/recovery/defaults/main.yml
+++ b/ansible/playbooks/roles/recovery/defaults/main.yml
@@ -2,5 +2,5 @@
recovery_dir: /epibackup
recovery_source_dir: "{{ recovery_dir }}/mounted"
recovery_source_host: "{{ groups.repository[0] if (custom_repository_url | default(false)) else (resolved_repository_hostname | default(groups.repository[0])) }}"
-elasticsearch_snapshot_repository_name: epiphany
-elasticsearch_snapshot_repository_location: /var/lib/elasticsearch-snapshots
+opensearch_snapshot_repository_name: epiphany
+opensearch_snapshot_repository_location: /var/lib/opensearch-snapshots
diff --git a/ansible/playbooks/roles/recovery/tasks/logging_kibana_etc.yml b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_conf.yml
similarity index 62%
rename from ansible/playbooks/roles/recovery/tasks/logging_kibana_etc.yml
rename to ansible/playbooks/roles/recovery/tasks/logging_opensearch_conf.yml
index 3792303795..3b50d75ca1 100644
--- a/ansible/playbooks/roles/recovery/tasks/logging_kibana_etc.yml
+++ b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_conf.yml
@@ -1,8 +1,13 @@
---
+- name: Include vars from opensearch role
+ include_vars:
+ file: roles/opensearch/vars/main.yml
+ name: opensearch_vars
+
- name: Find snapshot archive
import_tasks: common/find_snapshot_archive.yml
vars:
- snapshot_prefix: "kibana_etc"
+ snapshot_prefix: "opensearch_conf"
snapshot_name: "{{ specification.components.logging.snapshot_name }}"
- name: Transfer the archive via rsync
@@ -15,24 +20,24 @@
- name: Verify snapshot checksum
import_tasks: common/verify_snapshot_checksum.yml
-- name: Stop kibana service
+- name: Stop OpenSearch service
systemd:
- name: kibana
+ name: opensearch
state: stopped
- name: Clear directories
import_tasks: common/clear_directories.yml
vars:
dirs_to_clear:
- - /etc/kibana/
+ - "{{ opensearch_vars.specification.paths.opensearch_conf_dir }}"
- name: Extract the archive
unarchive:
- dest: /etc/kibana/
+ dest: "{{ opensearch_vars.specification.paths.opensearch_conf_dir }}"
src: "{{ recovery_dir }}/{{ snapshot_path | basename }}"
remote_src: true
-- name: Start kibana service
+- name: Start OpenSearch service
systemd:
- name: kibana
+ name: opensearch
state: started
diff --git a/ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_etc.yml b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_dashboards_conf.yml
similarity index 59%
rename from ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_etc.yml
rename to ansible/playbooks/roles/recovery/tasks/logging_opensearch_dashboards_conf.yml
index 7c81954bf5..fcbfcd0f2e 100644
--- a/ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_etc.yml
+++ b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_dashboards_conf.yml
@@ -1,8 +1,13 @@
---
+- name: Include vars from opensearch role
+ include_vars:
+ file: roles/opensearch_dashboards/vars/main.yml
+ name: opensearch_dashboards_vars
+
- name: Find snapshot archive
import_tasks: common/find_snapshot_archive.yml
vars:
- snapshot_prefix: "elasticsearch_etc"
+ snapshot_prefix: "opsd_conf_dir"
snapshot_name: "{{ specification.components.logging.snapshot_name }}"
- name: Transfer the archive via rsync
@@ -15,24 +20,24 @@
- name: Verify snapshot checksum
import_tasks: common/verify_snapshot_checksum.yml
-- name: Stop elasticsearch service
+- name: Stop opensearch-dashboards service
systemd:
- name: elasticsearch
+ name: opensearch-dashboards
state: stopped
- name: Clear directories
import_tasks: common/clear_directories.yml
vars:
dirs_to_clear:
- - /etc/elasticsearch/
+ - "{{ opensearch_dashboards_vars.specification.paths.opsd_conf_dir }}"
- name: Extract the archive
unarchive:
- dest: /etc/elasticsearch/
+ dest: "{{ opensearch_dashboards_vars.specification.paths.opsd_conf_dir }}"
src: "{{ recovery_dir }}/{{ snapshot_path | basename }}"
remote_src: true
-- name: Start elasticsearch service
+- name: Start opensearch-dashboards service
systemd:
- name: elasticsearch
+ name: opensearch-dashboards
state: started
diff --git a/ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_snapshot.yml b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_snapshot.yml
similarity index 66%
rename from ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_snapshot.yml
rename to ansible/playbooks/roles/recovery/tasks/logging_opensearch_snapshot.yml
index f1fa9bf15f..19ca6645c6 100644
--- a/ansible/playbooks/roles/recovery/tasks/logging_elasticsearch_snapshot.yml
+++ b/ansible/playbooks/roles/recovery/tasks/logging_opensearch_snapshot.yml
@@ -1,12 +1,12 @@
---
-- name: Include default vars from opendistro_for_elasticsearch role
+- name: Include default vars from opensearch role
include_vars:
- file: roles/opendistro_for_elasticsearch/defaults/main.yml
+ file: roles/opensearch/defaults/main.yml
name: odfe
- name: Set helper facts
set_fact:
- elasticsearch_endpoint: >-
+ opensearch_endpoint: >-
https://{{ ansible_default_ipv4.address }}:9200
vars:
uri_template: &uri
@@ -18,7 +18,7 @@
- name: Check cluster health
uri:
<<: *uri
- url: "{{ elasticsearch_endpoint }}/_cluster/health"
+ url: "{{ opensearch_endpoint }}/_cluster/health"
method: GET
register: uri_response
until: uri_response is success
@@ -28,7 +28,7 @@
- name: Find snapshot archive
import_tasks: common/find_snapshot_archive.yml
vars:
- snapshot_prefix: "elasticsearch_snapshot"
+ snapshot_prefix: "opensearch_snapshot"
snapshot_name: "{{ specification.components.logging.snapshot_name }}"
- name: Transfer the archive via rsync
@@ -45,38 +45,38 @@
import_tasks: common/clear_directories.yml
vars:
dirs_to_clear:
- - "{{ elasticsearch_snapshot_repository_location }}/"
+ - "{{ opensearch_snapshot_repository_location }}/"
- name: Extract the archive
unarchive:
- dest: "{{ elasticsearch_snapshot_repository_location }}/"
+ dest: "{{ opensearch_snapshot_repository_location }}/"
src: "{{ recovery_dir }}/{{ snapshot_path | basename }}"
remote_src: true
- name: Change snapshot directory permissions
file:
- path: "{{ elasticsearch_snapshot_repository_location }}/"
- owner: elasticsearch
- group: elasticsearch
+ path: "{{ opensearch_snapshot_repository_location }}/"
+ owner: opensearch
+ group: opensearch
recurse: true
- name: Reconstruct the snapshot_name
set_fact:
snapshot_name: >-
- {{ snapshot_path | basename | regex_replace('^elasticsearch_snapshot_(.*).tar.gz$', '\1') }}
+ {{ snapshot_path | basename | regex_replace('^opensearch_snapshot_(.*).tar.gz$', '\1') }}
-- debug: var=snapshot_name
-
-- name: Ensure all kibana and filebeat instances are stopped, then restore the snapshot
+- name: Display snapshot name
+ debug: var=snapshot_name
+- name: Ensure all OPSD and filebeat instances are stopped, then restore the snapshot
block:
- - name: Stop all kibana instances
+ - name: Stop allOpenSearch Dashboards instances
delegate_to: "{{ item }}"
systemd:
- name: kibana
+ name: opensearch-dashboards
state: stopped
enabled: false
- loop: "{{ groups.kibana | default([]) }}"
+ loop: "{{ groups.opensearch_dashboards | default([]) }}"
- name: Stop all filebeat instances
delegate_to: "{{ item }}"
@@ -89,29 +89,29 @@
- name: Close all indices
uri:
<<: *uri
- url: "{{ elasticsearch_endpoint }}/_all/_close"
+ url: "{{ opensearch_endpoint }}/_all/_close"
method: POST
- name: Delete all indices
uri:
<<: *uri
- url: "{{ elasticsearch_endpoint }}/_all"
+ url: "{{ opensearch_endpoint }}/_all"
method: DELETE
- name: Restore the snapshot
uri:
<<: *uri
- url: "{{ elasticsearch_endpoint }}/_snapshot/{{ elasticsearch_snapshot_repository_name }}/{{ snapshot_name }}/_restore"
+ url: "{{ opensearch_endpoint }}/_snapshot/{{ opensearch_snapshot_repository_name }}/{{ snapshot_name }}/_restore"
method: POST
always:
- - name: Start all kibana instances
+ - name: Start all OpenSearch Dashboards instances
delegate_to: "{{ item }}"
systemd:
- name: kibana
+ name: opensearch-dashboards
state: started
enabled: true
- loop: "{{ groups.kibana | default([]) }}"
+ loop: "{{ groups.opensearch_dashboards | default([]) }}"
- name: Start all filebeat instances
delegate_to: "{{ item }}"
diff --git a/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/debian/debian.yml b/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/debian/debian.yml
index aaa42da37d..447d31536d 100644
--- a/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/debian/debian.yml
+++ b/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/debian/debian.yml
@@ -24,10 +24,6 @@ repositories:
content: 'deb https://artifacts.elastic.co/packages/oss-7.x/apt stable main'
key: 'https://artifacts.elastic.co/GPG-KEY-elasticsearch'
- opendistroforelasticsearch:
- content: 'deb https://d3g5vo6xdbdb9a.cloudfront.net/apt stable main'
- key: 'https://d3g5vo6xdbdb9a.cloudfront.net/GPG-KEY-opendistroforelasticsearch'
-
# postgresql
pgdg:
content: 'deb http://apt.postgresql.org/pub/repos/apt focal-pgdg main'
diff --git a/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/redhat/redhat.yml b/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/redhat/redhat.yml
index d040640e1b..2d952bc43a 100644
--- a/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/redhat/redhat.yml
+++ b/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/redhat/redhat.yml
@@ -46,19 +46,6 @@ repositories:
- https://packages.cloud.google.com/yum/doc/yum-key.gpg
- https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
- opendistroforelasticsearch:
- id: opendistroforelasticsearch-artifacts-repo
- data: |
- name=Release RPM artifacts of OpenDistroForElasticsearch
- baseurl=https://d3g5vo6xdbdb9a.cloudfront.net/yum/noarch/
- enabled=1
- gpgcheck=1
- repo_gpgcheck=1
- autorefresh=1
- type=rpm-md
- gpg_keys:
- - https://d3g5vo6xdbdb9a.cloudfront.net/GPG-KEY-opendistroforelasticsearch
-
postgresql-13:
id: pgdg13
data: |
diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/debian/ubuntu-20.04/packages.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/debian/ubuntu-20.04/packages.yml
index b392062e2e..46cab38ff6 100644
--- a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/debian/ubuntu-20.04/packages.yml
+++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/debian/ubuntu-20.04/packages.yml
@@ -14,7 +14,8 @@ packages:
- 'docker-ce-cli=5:20.10.8*'
- 'docker-ce-rootless-extras=5:20.10.8*'
- 'ebtables'
- # for opendistroforelasticsearch & logging roles
+
+ # for opensearch & logging roles
- 'elasticsearch-oss=7.10.2*'
# Erlang packages must be compatible with RabbitMQ version.
@@ -57,13 +58,6 @@ packages:
# for nfs-common
- 'libtirpc3'
- - 'opendistro-alerting=1.13.1*'
- - 'opendistro-index-management=1.13.1*'
- - 'opendistro-job-scheduler=1.13.0*'
- - 'opendistro-performance-analyzer=1.13.0*'
- - 'opendistro-security=1.13.1*'
- - 'opendistro-sql=1.13.0*'
- - 'opendistroforelasticsearch-kibana=1.13.1*'
- 'openjdk-8-jre-headless'
- 'openssl'
- 'postgresql-13'
diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml
index ec2f266d7a..e582b3ef9d 100644
--- a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml
+++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml
@@ -44,3 +44,13 @@ files:
'https://charts.rook.io/release/rook-ceph-cluster-v1.8.8.tgz':
sha256: df4e1f2125af41fb84c72e4d12aa0cb859dddd4f37b3d5979981bd092040bd16
+
+ # --- OpenSearch Bundle ---
+ 'https://artifacts.opensearch.org/releases/bundle/opensearch/1.2.4/opensearch-1.2.4-linux-x64.tar.gz':
+ sha256: d40f2696623b6766aa235997e2847a6c661a226815d4ba173292a219754bd8a8
+
+ 'https://artifacts.opensearch.org/releases/bundle/opensearch-dashboards/1.2.0/opensearch-dashboards-1.2.0-linux-x64.tar.gz':
+ sha256: 14623798e61be6913e2a218d6ba3e308e5036359d7bda58482ad2f1340aa3c85
+
+ 'https://github.com/opensearch-project/perftop/releases/download/1.2.0.0/opensearch-perf-top-1.2.0.0-linux-x64.zip':
+ sha256: e8f9683976001a8cf59a9f86da5caafa10b88643315f0af2baa93a9354d41e2b
diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat/packages.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat/packages.yml
index 82195f01cb..87aa2538bb 100644
--- a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat/packages.yml
+++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat/packages.yml
@@ -36,7 +36,7 @@ packages:
- 'docker-ce-cli-20.10.8'
- 'docker-ce-rootless-extras-20.10.8'
- 'elasticsearch-curator-5.8.3'
- - 'elasticsearch-oss-7.10.2' # for opendistroforelasticsearch & logging roles
+ - 'elasticsearch-oss-7.10.2' # for opensearch & logging roles
- 'ethtool'
- 'filebeat-7.12.1'
- 'firewalld'
@@ -68,15 +68,7 @@ packages:
- 'nmap-ncat'
- 'nss' # for java-1.8.0-openjdk-headless
- 'nss-softokn' # for nss
- # Open Distro for Elasticsearch plugins are installed individually to not download them twice in different versions (as dependencies of opendistroforelasticsearch package)
- 'ntsysv' # for python36
- - 'opendistro-alerting-1.13.1.*'
- - 'opendistro-index-management-1.13.1.*'
- - 'opendistro-job-scheduler-1.13.0.*'
- - 'opendistro-performance-analyzer-1.13.0.*'
- - 'opendistro-security-1.13.1.*'
- - 'opendistro-sql-1.13.0.*'
- - 'opendistroforelasticsearch-kibana-1.13.1' # kibana has shorter version
- 'openssl'
- 'perl' # for vim
- 'perl-Getopt-Long' # for vim
diff --git a/ansible/playbooks/roles/upgrade/defaults/main.yml b/ansible/playbooks/roles/upgrade/defaults/main.yml
index e7e0a5f77a..1695625cdb 100644
--- a/ansible/playbooks/roles/upgrade/defaults/main.yml
+++ b/ansible/playbooks/roles/upgrade/defaults/main.yml
@@ -1,24 +1,10 @@
---
-logging:
+opensearch:
upgrade_config:
custom_admin_certificate:
- cert_path: /etc/elasticsearch/custom-admin.pem
- key_path: /etc/elasticsearch/custom-admin-key.pem
-
-opendistro_for_elasticsearch:
- upgrade_config:
- custom_admin_certificate:
- cert_path: /etc/elasticsearch/custom-admin.pem
- key_path: /etc/elasticsearch/custom-admin-key.pem
-
- certs_migration:
- demo_DNs:
- admin: CN=kirk,OU=client,O=client,L=test,C=de
- node: CN=node-0.example.com,OU=node,O=node,L=test,DC=de
- dual_root_ca:
- filename: demo2epiphany-certs-migration-root-CAs.pem
-
- upgrade_state_file_path: /etc/elasticsearch/epicli-upgrade-started.state
+ cert_path: /etc/elasticsearch/epiphany-admin.pem
+ key_path: /etc/elasticsearch/epiphany-admin-key.pem
+ upgrade_state_file_path: /var/lib/epiphany/upgrade/state/opensearch-upgrade.uncompleted
kubernetes:
upgrade_state_file_path: /var/lib/epiphany/upgrade/state/kubernetes-{{ ver }}.uncompleted
diff --git a/ansible/playbooks/roles/upgrade/tasks/elasticsearch-curator.yml b/ansible/playbooks/roles/upgrade/tasks/elasticsearch-curator.yml
index f7731c3218..81af709f8f 100644
--- a/ansible/playbooks/roles/upgrade/tasks/elasticsearch-curator.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/elasticsearch-curator.yml
@@ -24,6 +24,6 @@
- name: Update elasticsearch-curator package
include_role:
name: elasticsearch_curator
- tasks_from: install-es-curator-{{ ansible_os_family }} # update only package and do not change configured cron jobs
+ tasks_from: install-ops-curator-{{ ansible_os_family }} # update only package and do not change configured cron jobs
when:
- curator_defaults.curator_version is version(ansible_facts.packages['elasticsearch-curator'][0].version, '>')
diff --git a/ansible/playbooks/roles/upgrade/tasks/kibana.yml b/ansible/playbooks/roles/upgrade/tasks/kibana.yml
deleted file mode 100644
index c8e3baab72..0000000000
--- a/ansible/playbooks/roles/upgrade/tasks/kibana.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: Kibana | Get information about installed packages as facts
- package_facts:
- manager: auto
- when: ansible_facts.packages is undefined
-
-# Kibana is upgraded only when there is no 'kibana-oss' package (replaced by 'opendistroforelasticsearch-kibana' since v0.5).
-# This condition has been added to not fail when 'epicli upgrade' is run for Epiphany v0.4 cluster.
-# We cannot upgrade Kibana to v7 having Elasticsearch v6.
-- name: Upgrade Kibana
- when: ansible_facts.packages['kibana-oss'] is undefined
- block:
- - name: Kibana | Assert that opendistroforelasticsearch-kibana package is installed
- assert:
- that: ansible_facts.packages['opendistroforelasticsearch-kibana'] is defined
- fail_msg: opendistroforelasticsearch-kibana package not found, nothing to upgrade
- quiet: true
-
- - name: Kibana | Load defaults from kibana role
- include_vars:
- file: roles/kibana/defaults/main.yml
- name: kibana_defaults
-
- - name: Kibana | Print versions
- debug:
- msg:
- - "Installed version: {{ ansible_facts.packages['opendistroforelasticsearch-kibana'][0].version }}"
- - "Target version: {{ kibana_defaults.kibana_version[ansible_os_family] }}"
-
- - name: Upgrade Kibana
- when:
- - kibana_defaults.kibana_version[ansible_os_family]
- is version(ansible_facts.packages['opendistroforelasticsearch-kibana'][0].version, '>=')
- block:
- - name: Kibana | Slurp /etc/kibana/kibana.yml
- slurp:
- src: /etc/kibana/kibana.yml
- register: _kibana_config_yml
- no_log: true
-
- - name: Kibana | Upgrade
- import_role:
- name: kibana
- vars:
- context: upgrade
- existing_es_password: >-
- {{ (_kibana_config_yml.content | b64decode | from_yaml)['elasticsearch.password'] }}
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-01.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-01.yml
deleted file mode 100644
index b3f14e4137..0000000000
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-01.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-- name: ODFE | Get information about installed packages as facts
- package_facts:
- manager: auto
- when: ansible_facts.packages is undefined
-
-- name: ODFE | Assert that elasticsearch-oss package is installed
- assert:
- that: ansible_facts.packages['elasticsearch-oss'] is defined
- fail_msg: elasticsearch-oss package not found, nothing to upgrade
- quiet: true
-
-- name: ODFE | Include defaults from opendistro_for_elasticsearch role
- include_vars:
- file: roles/opendistro_for_elasticsearch/defaults/main.yml
- name: odfe_defaults
-
-- name: ODFE | Patch log4j
- include_role:
- name: opendistro_for_elasticsearch
- tasks_from: patch-log4j
- when: odfe_defaults.log4j_file_name is defined
-
-- name: Restart elasticsearch service
- systemd:
- name: elasticsearch
- state: restarted
- register: restart_elasticsearch
- when: odfe_defaults.log4j_file_name is defined and log4j_patch.changed
-
-- name: ODFE | Print elasticsearch-oss versions
- debug:
- msg:
- - "Installed version: {{ ansible_facts.packages['elasticsearch-oss'][0].version }}"
- - "Target version: {{ odfe_defaults.versions[ansible_os_family].elasticsearch_oss }}"
-
-# If state file exists it means the previous run failed
-- name: ODFE | Check if upgrade state file exists
- stat:
- path: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}"
- get_attributes: false
- get_checksum: false
- get_mime: false
- register: stat_upgrade_state_file
-
-- name: ODFE | Upgrade Elasticsearch and ODFE plugins (part 1/2)
- include_tasks: opendistro_for_elasticsearch/upgrade-elasticsearch-01.yml
- when: _target_version is version(ansible_facts.packages['elasticsearch-oss'][0].version, '>')
- or (_target_version is version(ansible_facts.packages['elasticsearch-oss'][0].version, '==')
- and stat_upgrade_state_file.stat.exists)
- vars:
- _target_version: "{{ odfe_defaults.versions[ansible_os_family].elasticsearch_oss }}"
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-02.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-02.yml
deleted file mode 100644
index 2b3f304465..0000000000
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch-02.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# If state file exists, it means upgrade has been started by the previous play and should be continued
-- name: ODFE | Check if upgrade state file exists
- stat:
- path: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}"
- get_attributes: false
- get_checksum: false
- get_mime: false
- register: stat_upgrade_state_file
-
-- name: ODFE | Upgrade Elasticsearch and ODFE plugins (part 2/2)
- include_tasks: opendistro_for_elasticsearch/upgrade-elasticsearch-02.yml
- when: stat_upgrade_state_file.stat.exists
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-01.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-01.yml
deleted file mode 100644
index 806c09a3d0..0000000000
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-01.yml
+++ /dev/null
@@ -1,71 +0,0 @@
----
-# =================================================================================================
-# Migration from demo certs to generated by Epiphany
-# -------------------------------------------------------------------------------------------------
-# A) Parallel part (all nodes at the same time) - THIS FILE
-# 1. Assert API access using demo cert (done in pre-migration part)
-# 2. Generate Epiphany certs (done in pre-migration part)
-# 3. Save cluster status to file (done in pre-migration part)
-# 4. Create dual root CA file for the migration (demo + Epiphany root CAs concatenated), needed temporarily
-# 5. Patch the following properties in existing elasticsearch.yml:
-# a) opendistro_security.authcz.admin_dn - add Epiphany admin cert
-# b) opendistro_security.nodes_dn - by default not present, add all Epiphany node certs
-# c) opendistro_security.ssl.http.pemtrustedcas_filepath - replace demo root CA with the dual root CA file
-# d) opendistro_security.ssl.transport.pemtrustedcas_filepath - replace demo root CA with the dual root CA file
-# B) Serial part (node by node) - tasks from migrate-from-demo-certs-02.yml
-
-# Create dual root CA transitional file
-- include_tasks: utils/create-dual-cert-file.yml
- vars:
- certs_to_concatenate:
- - "{{ (certificates.dirs.certs, certificates.files.demo.root_ca.cert) | path_join }}"
- - "{{ (certificates.dirs.certs, certificates.files.root_ca.cert.filename) | path_join }}"
- target_path: "{{ (certificates.dirs.certs, opendistro_for_elasticsearch.certs_migration.dual_root_ca.filename) | path_join }}"
-
-- name: ODFE | Load /etc/elasticsearch/elasticsearch.yml
- slurp:
- src: /etc/elasticsearch/elasticsearch.yml
- register: _elasticsearch_yml
-
-- name: OFDE | Patch /etc/elasticsearch/elasticsearch.yml (switch to dual root CA)
- copy:
- dest: /etc/elasticsearch/elasticsearch.yml
- content: "{{ _patched_content | to_nice_yaml }}"
- mode: u=rw,g=rw,o=
- owner: root
- group: elasticsearch
- backup: true
- vars:
- _epiphany_subjects:
- admin: "{{ certificates.files.admin.cert.subject }}"
- node: "{{ certificates.files.node.cert.subject }}"
- _epiphany_dn_attributes:
- admin: "{{ certificates.dn_attributes_order | intersect(_epiphany_subjects.admin.keys()) }}"
- node: "{{ certificates.dn_attributes_order | intersect(_epiphany_subjects.node.keys()) }}"
- _epiphany_DNs:
- admin: >-
- {{ _epiphany_dn_attributes.admin | zip(_epiphany_dn_attributes.admin | map('extract', _epiphany_subjects.admin))
- | map('join','=') | join(',') }}
- node: >-
- {{ _epiphany_dn_attributes.node | zip(_epiphany_dn_attributes.node | map('extract', _epiphany_subjects.node))
- | map('join','=') | join(',') }}
- _epiphany_nodes_dn: >-
- {%- for node in ansible_play_hosts_all -%}
- {%- if loop.first -%}[{%- endif -%}
- '{{ _epiphany_DNs.node.split(',') | map('regex_replace', '^CN=.+$', 'CN=' + hostvars[node].ansible_nodename) | join(',') }}'
- {%- if not loop.last -%},{%- else -%}]{%- endif -%}
- {%- endfor -%}
- _old_content: >-
- {{ _elasticsearch_yml.content | b64decode | from_yaml }}
- _updated_settings:
- opendistro_security.authcz.admin_dn: >-
- {{ _old_content['opendistro_security.authcz.admin_dn'] | default([]) | map('replace', ', ', ',')
- | union([opendistro_for_elasticsearch.certs_migration.demo_DNs.admin] + [_epiphany_DNs.admin]) }}
- opendistro_security.nodes_dn: >-
- {{ _old_content['opendistro_security.nodes_dn'] | default([])
- | union([opendistro_for_elasticsearch.certs_migration.demo_DNs.node] + _epiphany_nodes_dn) }}
-
- opendistro_security.ssl.http.pemtrustedcas_filepath: "{{ opendistro_for_elasticsearch.certs_migration.dual_root_ca.filename }}"
- opendistro_security.ssl.transport.pemtrustedcas_filepath: "{{ opendistro_for_elasticsearch.certs_migration.dual_root_ca.filename }}"
- _patched_content: >-
- {{ _old_content | combine(_updated_settings) }}
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-02.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-02.yml
deleted file mode 100644
index 223f6968df..0000000000
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-02.yml
+++ /dev/null
@@ -1,115 +0,0 @@
----
-# =================================================================================================
-# Migration from demo certs to generated by Epiphany
-# -------------------------------------------------------------------------------------------------
-# A) Parallel part (all nodes at the same time) - tasks from migrate-from-demo-certs-01.yml
-# B) Serial part (node by node) - THIS FILE
-# 1. Prepare cluster for a node restart (disable shard allocation)
-# 2. Restart all nodes one by one waiting for yellow cluster status after each restart
-# 3. Patch elasticsearch.yml to use Epiphany node cert instead of demo (all nodes)
-# 4. Restart all nodes one by one waiting for yellow cluster status after each restart
-# 5. Re-enable shard allocation
-# 6. Wait for green/yellow cluster status
-# 7. Test API access using Epiphany admin cert (all nodes)
-# 8. Update API related facts to use Epiphany admin cert instead of demo
-# 9. Reload config file
-
-- when: inventory_hostname == ansible_play_hosts_all[0] # run once
- block:
- # Prepare cluster for a node restart
- - include_tasks: utils/prepare-cluster-for-node-restart.yml
-
- # Restart all nodes (special flow: run once but in loop for each host)
- - include_tasks:
- file: utils/restart-node.yml
- apply:
- delegate_to: "{{ target_inventory_hostname }}"
- delegate_facts: true
- loop: "{{ ansible_play_hosts_all }}"
- loop_control:
- loop_var: target_inventory_hostname
-
- # Patch elasticsearch.yml to use Epiphany node cert (all hosts)
-
- - name: ODFE | Load /etc/elasticsearch/elasticsearch.yml
- slurp:
- src: /etc/elasticsearch/elasticsearch.yml
- register: _elasticsearch_yml
- delegate_to: "{{ target_inventory_hostname }}"
- loop: "{{ ansible_play_hosts_all }}"
- loop_control:
- loop_var: target_inventory_hostname
-
- - name: OFDE | Patch /etc/elasticsearch/elasticsearch.yml (switch to Epiphany node certificates)
- copy:
- dest: /etc/elasticsearch/elasticsearch.yml
- content: "{{ _patched_content | to_nice_yaml }}"
- mode: u=rw,g=rw,o=
- owner: root
- group: elasticsearch
- backup: true
- delegate_to: "{{ target_inventory_hostname }}"
- delegate_facts: true
- loop: "{{ ansible_play_hosts_all }}"
- loop_control:
- index_var: loop_index0
- loop_var: target_inventory_hostname
- vars:
- _node_hostname: "{{ hostvars[target_inventory_hostname].ansible_nodename }}"
- _epiphany_node_cert:
- cert_filename: "{{ certificates.files.node.cert.filename | replace(ansible_nodename, _node_hostname) }}"
- key_filename: "{{ certificates.files.node.key.filename | replace(ansible_nodename, _node_hostname) }}"
- _old_content: >-
- {{ _elasticsearch_yml.results[loop_index0].content | b64decode | from_yaml }}
- _updated_settings:
- opendistro_security.ssl.http.pemcert_filepath: "{{ _epiphany_node_cert.cert_filename }}"
- opendistro_security.ssl.http.pemkey_filepath: "{{ _epiphany_node_cert.key_filename }}"
- opendistro_security.ssl.transport.pemcert_filepath: "{{ _epiphany_node_cert.cert_filename }}"
- opendistro_security.ssl.transport.pemkey_filepath: "{{ _epiphany_node_cert.key_filename }}"
- _patched_content: >-
- {{ _old_content | combine(_updated_settings) }}
-
- # Restart all nodes (special flow: run once but in loop for each host)
- - include_tasks:
- file: utils/restart-node.yml
- apply:
- delegate_to: "{{ target_inventory_hostname }}"
- delegate_facts: true
- loop: "{{ ansible_play_hosts_all }}"
- loop_control:
- loop_var: target_inventory_hostname
-
- # Re-enable shard allocation
- - include_tasks: utils/enable-shard-allocation.yml
-
- # Wait for shard allocation (for 'green' status at least 2 nodes must be already upgraded)
- - include_tasks: utils/wait-for-shard-allocation.yml
-
- # Test API access using Epiphany admin cert (all nodes)
- - include_tasks:
- file: utils/assert-api-access.yml
- apply:
- delegate_to: "{{ target_inventory_hostname }}"
- delegate_facts: true
- loop: "{{ ansible_play_hosts_all }}"
- loop_control:
- loop_var: target_inventory_hostname
- vars:
- es_api:
- cert_type: Epiphany
- cert_path: &epi_cert_path "{{ (certificates.dirs.certs, certificates.files.admin.cert.filename) | path_join }}"
- key_path: &epi_key_path "{{ (certificates.dirs.certs, certificates.files.admin.key.filename) | path_join }}"
- url: "{{ hostvars[target_inventory_hostname].es_api.url }}"
- fail_msg: API access test failed.
-
-- name: Update API related facts to use Epiphany admin certificate instead of demo
- set_fact:
- es_api: "{{ es_api | combine(_es_api) }}"
- vars:
- _es_api:
- cert_type: Epiphany
- cert_path: *epi_cert_path
- key_path: *epi_key_path
-
-# Reload config file to preserve patched settings (sets 'existing_config' fact)
-- include_tasks: utils/get-config-from-files.yml
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-non-clustered.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-non-clustered.yml
deleted file mode 100644
index addd327aa3..0000000000
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/migrate-from-demo-certs-non-clustered.yml
+++ /dev/null
@@ -1,77 +0,0 @@
----
-- name: ODFE | Load /etc/elasticsearch/elasticsearch.yml
- slurp:
- src: /etc/elasticsearch/elasticsearch.yml
- register: _elasticsearch_yml
-
-- name: OFDE | Patch /etc/elasticsearch/elasticsearch.yml (switch to generated certificates)
- copy:
- dest: /etc/elasticsearch/elasticsearch.yml
- content: "{{ _patched_content | to_nice_yaml }}"
- mode: u=rw,g=rw,o=
- owner: root
- group: elasticsearch
- backup: true
- vars:
- _epiphany_subjects:
- admin: "{{ certificates.files.admin.cert.subject }}"
- node: "{{ certificates.files.node.cert.subject }}"
- _epiphany_dn_attributes:
- admin: "{{ certificates.dn_attributes_order | intersect(_epiphany_subjects.admin.keys()) }}"
- node: "{{ certificates.dn_attributes_order | intersect(_epiphany_subjects.node.keys()) }}"
- _epiphany_DNs:
- admin: >-
- {{ _epiphany_dn_attributes.admin | zip(_epiphany_dn_attributes.admin | map('extract', _epiphany_subjects.admin))
- | map('join','=') | join(',') }}
- node: >-
- {{ _epiphany_dn_attributes.node | zip(_epiphany_dn_attributes.node | map('extract', _epiphany_subjects.node))
- | map('join','=') | join(',') }}
- _old_content: >-
- {{ _elasticsearch_yml.content | b64decode | from_yaml }}
- _updated_settings:
- opendistro_security.authcz.admin_dn: >-
- {{ _old_content['opendistro_security.authcz.admin_dn'] | default([]) | map('replace', ', ', ',')
- | union([_epiphany_DNs.admin]) }}
- opendistro_security.nodes_dn: >-
- {{ _old_content['opendistro_security.nodes_dn'] | default([])
- | union([_epiphany_DNs.node]) }}
-
- opendistro_security.ssl.http.pemcert_filepath: "{{ certificates.files.node.cert.filename }}"
- opendistro_security.ssl.http.pemkey_filepath: "{{ certificates.files.node.key.filename }}"
- opendistro_security.ssl.transport.pemcert_filepath: "{{ certificates.files.node.cert.filename }}"
- opendistro_security.ssl.transport.pemkey_filepath: "{{ certificates.files.node.key.filename }}"
-
- opendistro_security.ssl.http.pemtrustedcas_filepath: "{{ certificates.files.root_ca.cert.filename }}"
- opendistro_security.ssl.transport.pemtrustedcas_filepath: "{{ certificates.files.root_ca.cert.filename }}"
-
- _patched_content: >-
- {{ _old_content | combine(_updated_settings) }}
-
-- include_tasks:
- file: utils/restart-node.yml
- vars:
- target_inventory_hostname: "{{ inventory_hostname }}"
- skip_waiting_for_node: true # because after restart demo certificate stops working
-
-# Test API access using Epiphany admin cert
-- include_tasks:
- file: utils/assert-api-access.yml
- vars:
- es_api:
- cert_type: Epiphany
- cert_path: &epi_cert_path "{{ (certificates.dirs.certs, certificates.files.admin.cert.filename) | path_join }}"
- key_path: &epi_key_path "{{ (certificates.dirs.certs, certificates.files.admin.key.filename) | path_join }}"
- url: "{{ hostvars[inventory_hostname].es_api.url }}"
- fail_msg: API access test failed.
-
-- name: Update API related facts to use Epiphany admin certificate instead of demo
- set_fact:
- es_api: "{{ es_api | combine(_es_api) }}"
- vars:
- _es_api:
- cert_type: Epiphany
- cert_path: *epi_cert_path
- key_path: *epi_key_path
-
-# Reload config file to preserve patched settings (sets 'existing_config' fact)
-- include_tasks: utils/get-config-from-files.yml
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-01.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-01.yml
deleted file mode 100644
index e709502eda..0000000000
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-01.yml
+++ /dev/null
@@ -1,157 +0,0 @@
----
-# This file contains only pre-upgrade tasks that can be run in parallel on all hosts
-
-- name: ODFE | Create upgrade state file
- become: true
- file:
- path: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}"
- state: touch
- mode: u=rw,g=r,o=
-
-- name: ODFE | Ensure elasticsearch service is running
- systemd:
- name: elasticsearch
- enabled: yes
- state: started
- register: elasticsearch_state
-
-# Sets 'existing_config' fact
-- include_tasks: utils/get-config-from-files.yml
-
-- name: ODFE | Set common facts
- set_fact:
- certificates: "{{ odfe_defaults.certificates }}"
- es_host: "{{ existing_config.main['network.host'] | default('_local_') }}"
- es_http_port: "{{ existing_config.main['http.port'] | default(odfe_defaults.ports.http) }}"
- es_transport_port: "{{ existing_config.main['transport.port'] | default(odfe_defaults.ports.transport) }}"
- es_clustered: "{{ (existing_config.main['discovery.seed_hosts'] | length > 1) | ternary(True, False) }}"
- es_node_name: "{{ existing_config.main['node.name'] }}"
-
-- name: ODFE | Wait for elasticsearch service to start up
- wait_for:
- port: "{{ es_transport_port }}"
- host: "{{ es_host if (es_host is not regex('^_.+_$')) else '0.0.0.0' }}" # 0.0.0.0 means any IP
- when: elasticsearch_state.changed
-
-# This block requires elasticsearch service to be running
-- name: Get host address when special value is used # e.g. '_site_'
- when: es_host is regex('^_.+_$')
- block:
- - name: Gather facts on listening ports
- community.general.listen_ports_facts:
-
- - name: Get host address based on transport port
- set_fact:
- es_host: "{{ ansible_facts.tcp_listen | selectattr('port', '==', es_transport_port|int)
- | map(attribute='address') | reject('match', '::') | first }}"
-
-# NOTE: We need admin certificate for passwordless administrative access to REST API (since we don't know admin's password)
-
-- include_role:
- name: certificate
- tasks_from: install-packages # requirements for Ansible certificate modules
-
-- name: ODFE | Get information on root CA certificate
- community.crypto.x509_certificate_info:
- # 'pemtrustedcas_filepath' is a relative path
- path: "{{ ('/etc/elasticsearch', existing_config.main['opendistro_security.ssl.transport.pemtrustedcas_filepath']) | path_join }}"
- register: _root_ca_info
-
-- name: ODFE | Check if demo or Epiphany certificates are in use # self-signed
- set_fact:
- _is_demo_cert_in_use: "{{ 'True' if _root_ca_info.subject.commonName == 'Example Com Inc. Root CA' else 'False' }}"
- _is_epiphany_cert_in_use: "{{ 'True' if _root_ca_info.subject.commonName == 'Epiphany Managed ODFE Root CA' else 'False' }}"
-
-# For custom admin cert (non-demo and non-Epiphany), we use workaround (upgrade_config.custom_admin_certificate).
-# The workaround should be replaced after implementing task #2127.
-- name: ODFE | Set API access facts
- set_fact:
- es_api:
- cert_path: "{{ _cert_path[_cert_type] }}"
- cert_type: "{{ _cert_type }}"
- key_path: "{{ _key_path[_cert_type] }}"
- url: https://{{ es_host }}:{{ es_http_port }}
- vars:
- _cert_type: >-
- {{ 'demo' if (_is_demo_cert_in_use) else
- 'Epiphany' if (_is_epiphany_cert_in_use) else
- 'custom' }}
- _cert_path:
- custom: "{{ lookup('vars', current_group_name).upgrade_config.custom_admin_certificate.cert_path }}" # defaults are not available via hostvars
- demo: "{{ (certificates.dirs.certs, certificates.files.demo.admin.cert) | path_join }}"
- Epiphany: "{{ (certificates.dirs.certs, certificates.files.admin.cert.filename) | path_join }}"
- _key_path:
- custom: "{{ lookup('vars', current_group_name).upgrade_config.custom_admin_certificate.key_path }}"
- demo: "{{ (certificates.dirs.certs, certificates.files.demo.admin.key) | path_join }}"
- Epiphany: "{{ (certificates.dirs.certs, certificates.files.admin.key.filename) | path_join }}"
-
-- include_tasks: utils/assert-cert-files-exist.yml
-
-# =================================================================================================
-# FLOW
-# -------------------------------------------------------------------------------------------------
-# NOTE: For clustered nodes it's recommended to disable shard allocation for the cluster before restarting a node (https://www.elastic.co/guide/en/elasticsearch/reference/current/restart-cluster.html#restart-cluster-rolling)
-#
-# if cert_type == 'demo':
-# Test API access
-# Genereate Epiphany self-signed certs
-# Save cluster status to file
-# Run certificates migration procedure for all nodes when 'es_clustered is true'
-# // Subtasks of the migration procedure:
-# Test API access
-# Update API related facts to use Epiphany admin certificate instead of demo
-# if cert_type == 'Epiphany':
-# Genereate Epiphany self-signed certs - to re-new certs if expiration date differs
-# Test API access
-# Save cluster status to file
-# if cert_type == 'custom':
-# Test API access
-# Save cluster status to file
-# Run upgrade (removes known demo certificate files)
-# if cert_type == 'Epiphany':
-# Remove dual root CA file (created as part of the migration, needed until all nodes are upgraded)
-# =================================================================================================
-
-# Test API access (demo or custom certs)
-- include_tasks: utils/assert-api-access.yml
- when: es_api.cert_type in ['demo', 'custom']
- vars:
- _fail_msg:
- common: Test of accessing API with TLS authentication failed.
- custom: >-
- It looks like you use custom certificates.
- Please refer to 'Open Distro for Elasticsearch upgrade' section of How-To docs.
- demo: >-
- It looks like you use demo certificates but your configuration might be incorrect or unsupported.
- fail_msg: "{{ _fail_msg.common }} {{ _fail_msg[es_api.cert_type] }}"
-
-- name: Generate self-signed certificates
- include_role:
- name: opendistro_for_elasticsearch
- tasks_from: generate-certs
- when: es_api.cert_type != 'custom'
-
-# Test API access (Epiphany certs)
-- include_tasks: utils/assert-api-access.yml
- when: es_api.cert_type == 'Epiphany'
- vars:
- fail_msg: >-
- Test of accessing API with TLS authentication failed.
- It looks like you use certificates generated by Epiphany but your configuration might be incorrect or an unexpected error occurred.
-
-# Save cluster health status before upgrade to file
-- include_tasks: utils/save-initial-cluster-status.yml
-
-# Run migration procedure - the first (parallel) part for clustered installation
-- include_tasks: migrate-from-demo-certs-01.yml
- when:
- - es_api.cert_type == 'demo'
- - es_clustered # rolling upgrade only for clustered installation
-
-# Run migration procedure for non-clustered installation
-- include_tasks: migrate-from-demo-certs-non-clustered.yml
- when:
- - es_api.cert_type == 'demo'
- - not es_clustered
-
-# Next tasks are run in serial mode in the next play
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-02.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-02.yml
deleted file mode 100644
index 237f34d4d2..0000000000
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-elasticsearch-02.yml
+++ /dev/null
@@ -1,109 +0,0 @@
----
-# This file contains flow that cannot be run in parallel on multiple hosts because of rolling upgrades.
-# It's run after upgrade-elasticsearch-01.yml so some facts are already set.
-
-# Run migration procedure - the second (serial) part
-- include_tasks: opendistro_for_elasticsearch/migrate-from-demo-certs-02.yml
- when:
- - es_api.cert_type == 'demo'
- - es_clustered # rolling upgrade only for clustered installation
-
-- name: ODFE | Print API facts
- debug:
- var: es_api
- tags: [ never, debug ] # only runs when debug or never tag requested
-
-- name: ODFE | Prepare cluster for rolling upgrade
- include_tasks: opendistro_for_elasticsearch/utils/prepare-cluster-for-node-restart.yml
- when: es_clustered
-
-- name: ODFE | Stop elasticsearch service
- systemd:
- name: elasticsearch
- state: stopped
-
-- name: ODFE | Include Elasticsearch installation tasks
- include_role:
- name: opendistro_for_elasticsearch
- tasks_from: install-es.yml
-
-- name: ODFE | Include Elasticsearch configuration tasks
- include_role:
- name: opendistro_for_elasticsearch
- tasks_from: configure-es.yml
- vars:
- _old: "{{ existing_config.main }}"
- # Keep the same data structure as for apply mode
- specification:
- jvm_options: "{{ existing_config.jvm_options }}"
- cluster_name: "{{ _old['cluster.name'] }}"
- clustered: "{{ 'True' if _old['discovery.seed_hosts'] | length > 1 else 'False' }}"
- paths:
- data: "{{ _old['path.data'] }}"
- repo: "{{ _old['path.repo'] | default('/var/lib/elasticsearch-snapshots') }}" # absent in Epiphany v0.6 thus we use default
- logs: "{{ _old['path.logs'] }}"
- opendistro_security:
- ssl:
- transport:
- enforce_hostname_verification: "{{ _old['opendistro_security.ssl.transport.enforce_hostname_verification'] }}"
-
- _demo_DNs:
- admin: "{{ opendistro_for_elasticsearch.certs_migration.demo_DNs.admin }}"
- node: "{{ opendistro_for_elasticsearch.certs_migration.demo_DNs.node }}"
- _dual_root_ca_filename: "{{ opendistro_for_elasticsearch.certs_migration.dual_root_ca.filename }}"
- _epiphany_root_ca_filename: "{{ certificates.files.root_ca.cert.filename }}"
- _updated_existing_config:
- opendistro_security.authcz.admin_dn: "{{ _old['opendistro_security.authcz.admin_dn'] | reject('search', _demo_DNs.admin) }}"
- opendistro_security.nodes_dn: "{{ _old['opendistro_security.nodes_dn'] | default([]) | reject('search', _demo_DNs.node) }}"
- opendistro_security.ssl.http.pemtrustedcas_filepath: >-
- {{ _old['opendistro_security.ssl.http.pemtrustedcas_filepath'] | replace(_dual_root_ca_filename, _epiphany_root_ca_filename) }}
- opendistro_security.ssl.transport.pemtrustedcas_filepath: >-
- {{ _old['opendistro_security.ssl.transport.pemtrustedcas_filepath'] | replace(_dual_root_ca_filename, _epiphany_root_ca_filename) }}
-
- http.port: "{{ _old['http.port'] | default(odfe_defaults.ports.http) }}"
- transport.port: "{{ _old['transport.port'] | default(odfe_defaults.ports.transport) }}"
-
- existing_es_config: "{{ _old | combine(_updated_existing_config) }}"
-
-- name: ODFE | Include upgrade plugins tasks
- include_tasks: opendistro_for_elasticsearch/upgrade-plugins.yml
-
-# Restart elasticsearch service (unconditionally to ensure this task is not skipped in case of rerunning after interruption)
-- include_tasks: opendistro_for_elasticsearch/utils/restart-node.yml
- vars:
- daemon_reload: true # opendistro-performance-analyzer provides opendistro-performance-analyzer.service
- target_inventory_hostname: "{{ inventory_hostname }}"
-
-# Post-upgrade tasks
-
-- name: Re-enable shard allocation
- when: es_clustered
- block:
- - include_tasks: opendistro_for_elasticsearch/utils/enable-shard-allocation.yml
-
- - include_tasks: opendistro_for_elasticsearch/utils/wait-for-shard-allocation.yml
-
-# Read cluster health status from before the upgrade
-- name: Load upgrade state file
- slurp:
- src: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}"
- register: slurp_upgrade_state_file
-
-# Verify cluster status
-- include_tasks: opendistro_for_elasticsearch/utils/wait-for-cluster-status.yml
- when: not es_clustered or
- (es_clustered and inventory_hostname == ansible_play_hosts_all[-1]) # for 'green' status at least 2 nodes must be already upgraded
- vars:
- initial_status: "{{ (slurp_upgrade_state_file.content | b64decode | from_json)['status'] }}"
- expected_status: "{{ [ initial_status, 'green'] | unique }}"
-
-- name: ODFE | Remove dual root CA temporary file
- file:
- path: "{{ (certificates.dirs.certs, opendistro_for_elasticsearch.certs_migration.dual_root_ca.filename) | path_join }}"
- state: absent
- when: es_api.cert_type == 'Epiphany'
-
-- name: ODFE | Remove upgrade state file
- file:
- path: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}"
- state: absent
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-plugins.yml b/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-plugins.yml
deleted file mode 100644
index 80e34e6382..0000000000
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/upgrade-plugins.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: ODFE plugins | Assert that opendistro-* packages are installed
- assert:
- that: ansible_facts.packages['{{ item }}'] is defined
- fail_msg: "Missing package to upgrade: {{ item }}"
- quiet: true
- loop:
- - opendistro-alerting
- - opendistro-index-management
- - opendistro-job-scheduler
- - opendistro-performance-analyzer
- - opendistro-security
- - opendistro-sql
-
-- name: ODFE plugins | Upgrade opendistro-* packages
- include_role:
- name: opendistro_for_elasticsearch
- tasks_from: install-opendistro.yml
diff --git a/ansible/playbooks/roles/upgrade/tasks/opensearch.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch.yml
new file mode 100644
index 0000000000..ecff2dcec3
--- /dev/null
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch.yml
@@ -0,0 +1,39 @@
+---
+- name: OpenSearch | Get information about installed packages as facts
+ package_facts:
+ manager: auto
+ when: ansible_facts.packages is undefined
+
+- name: OpenSearch | Run migration from ODFE
+ when:
+ - ansible_facts.packages['elasticsearch-oss'] is defined
+ block:
+ - name: OpenSearch | Include defaults from OpenSearch role
+ include_vars:
+ name: opensearch_defaults
+ file: roles/opensearch/defaults/main.yml
+
+ - name: OpenSearch | Include vars from OpenSearch role
+ include_vars:
+ name: opensearch_variables
+ file: roles/opensearch/vars/main.yml
+
+ - name: OpenSearch | Run pre ODFE migration tasks
+ include_role:
+ name: upgrade
+ tasks_from: opensearch/pre-migrate
+
+ - name: OpenSearch | Run ODFE migration tasks
+ include_role:
+ name: upgrade
+ tasks_from: opensearch/migrate-odfe
+
+ - name: OpenSearch | Run Kibana migration tasks
+ include_role:
+ name: upgrade
+ tasks_from: opensearch/migrate-kibana
+
+ - name: OpenSearch | Cleanup
+ include_role:
+ name: upgrade
+ tasks_from: opensearch/cleanup
diff --git a/ansible/playbooks/roles/upgrade/tasks/opensearch/cleanup.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/cleanup.yml
new file mode 100644
index 0000000000..6401d689bc
--- /dev/null
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/cleanup.yml
@@ -0,0 +1,24 @@
+---
+- name: OpenSearch | Get information about installed packages as facts
+ package_facts:
+ manager: auto
+ when: ansible_facts.packages is undefined
+
+- name: Remove Kibana package
+ when: ansible_facts.packages['kibana'] is defined
+ package:
+ name: kibana
+ state: absent
+
+- name: Remove Elasticsearch package
+ when: ansible_facts.packages['elasticsearch-oss'] is defined
+ package:
+ name: elasticsearch-oss
+ state: absent
+
+# All others ODFE plugins are removed as dependencies to above
+- name: Remove ODFE Kibana plugin
+ when: ansible_facts.packages['opendistroforelasticsearch-kibana'] is defined
+ package:
+ name: opendistroforelasticsearch-kibana
+ state: absent
diff --git a/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-kibana.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-kibana.yml
new file mode 100644
index 0000000000..77f755d64d
--- /dev/null
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-kibana.yml
@@ -0,0 +1,111 @@
+---
+- name: Kibana migration | Load deafults from OpenSearch Dashboards role
+ include_vars:
+ name: os_dashboards_defaults
+ file: roles/opensearch_dashboards/defaults/main.yml
+
+- name: Kibana migration | Load vars from OpenSearch Dashboards role
+ include_vars:
+ name: os_dashboards_variables
+ file: roles/opensearch_dashboards/vars/main.yml
+
+- name: Kibana migration | Download OpenSearch Dashboards binary
+ include_role:
+ name: download
+ tasks_from: download_file
+ vars:
+ file_name: >-
+ {{ os_dashboards_defaults.file_name_version.opensearch_dashboards[ansible_architecture] }}
+
+- name: Kibana migration | Create OpenSearch Dashboards OS group
+ group:
+ name: "{{ os_dashboards_variables.specification.dashboards_os_group }}"
+ state: present
+
+- name: Kibana migration | Create OpenSearch Dashboards OS user
+ user:
+ name: "{{ os_dashboards_variables.specification.dashboards_os_user }}"
+ state: present
+ shell: /bin/bash
+ group: "{{ os_dashboards_variables.specification.dashboards_os_group }}"
+ home: "{{ os_dashboards_variables.specification.paths.dashboards_home }}"
+ create_home: false
+
+- name: Kibana migration | Create OpenSearch Dashboards directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ os_dashboards_variables.specification.dashboards_os_user }}"
+ group: "{{ os_dashboards_variables.specification.dashboards_os_group }}"
+ mode: ug=rwx,o=rx
+ loop:
+ - "{{ os_dashboards_variables.specification.paths.dashboards_log_dir }}"
+ - "{{ os_dashboards_variables.specification.paths.dashboards_home }}"
+
+- name: Kibana migration | Extract the tar file
+ unarchive:
+ src: "{{ download_directory }}/{{ os_dashboards_defaults.file_name_version.opensearch_dashboards[ansible_architecture] }}"
+ dest: "{{ os_dashboards_variables.specification.paths.dashboards_home }}"
+ owner: "{{ os_dashboards_variables.specification.dashboards_os_user }}"
+ group: "{{ os_dashboards_variables.specification.dashboards_os_group }}"
+ remote_src: true
+ extra_opts:
+ - --strip-components=1
+
+- name: Kibana migration | Clone Kibana settings
+ copy:
+ src: /etc/kibana/kibana.yml
+ dest: "{{ os_dashboards_variables.specification.paths.dashboards_conf_dir }}/opensearch_dashboards.yml"
+ remote_src: true
+ owner: "{{ os_dashboards_variables.specification.dashboards_os_user }}"
+ group: "{{ os_dashboards_variables.specification.dashboards_os_group }}"
+ mode: ug=rw,o=
+
+- name: Kibana migration | Porting Kibana settings to OpenSearch Dashboards
+ replace:
+ path: "{{ os_dashboards_variables.specification.paths.dashboards_conf_dir }}/opensearch_dashboards.yml"
+ regexp: "{{ item.1 }}"
+ replace: "{{ item.2 }}"
+ loop:
+ - { 1: "elasticsearch", 2: "opensearch" }
+ - { 1: "/kibana", 2: "/opensearch-dashboards" }
+ - { 1: "opendistro_security", 2: "opensearch_security" }
+ # OPS claims to not recognize these 3 following Kibana variables
+ - { 1: "newsfeed.enabled", 2: "#newsfeed.enabled" }
+ - { 1: "telemetry.optIn", 2: "#telemetry.optIn" }
+ - { 1: "telemetry.enabled", 2: "#telemetry.enabled" }
+
+- name: Kibana migration | Create OpenSearch Dashboards service
+ template:
+ src: roles/opensearch_dashboards/templates/opensearch-dashboards.service.j2
+ dest: /etc/systemd/system/opensearch-dashboards.service
+ mode: u=rw,go=r
+ vars:
+ specification: "{{ os_dashboards_variables.specification }}"
+
+- name: Kibana migration | Stop Kibana service
+ systemd:
+ name: kibana
+ enabled: false
+ state: stopped
+
+- name: Kibana migration | Assure OpenSearch Dashboards service is started
+ service:
+ name: opensearch-dashboards
+ state: started
+ enabled: true
+
+- name: Kibana migration | Get all the installed dashboards plugins
+ command: "{{ os_dashboards_variables.specification.paths.dashboards_plugin_bin_path }} list"
+ become: false # This command can not be run as root user
+ register: list_plugins
+
+- name: Kibana migration | Show all the installed dashboards plugins
+ debug:
+ msg: "{{ list_plugins.stdout }}"
+
+- name: Kibana migration | Prevent Filebeat API access problem # Workaround for https://github.com/opensearch-project/OpenSearch-Dashboards/issues/656
+ replace:
+ path: /etc/filebeat/filebeat.yml
+ regexp: "setup.dashboards.enabled: true"
+ replace: "setup.dashboards.enabled: false"
diff --git a/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe-serial.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe-serial.yml
new file mode 100644
index 0000000000..bceb94c888
--- /dev/null
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe-serial.yml
@@ -0,0 +1,114 @@
+---
+# Below tasks need to be run in serial
+- name: ODFE migration | Stop Elasticsearch service
+ systemd:
+ name: elasticsearch
+ enabled: false
+ state: stopped
+ register: elasticsearch_state
+
+- name: ODFE migration | Install OpenSearch binaries
+ include_tasks: roles/opensearch/tasks/install-opensearch.yml
+ vars:
+ specification: "{{ opensearch_variables.specification }}"
+ file_name_version: "{{ opensearch_defaults.file_name_version }}"
+
+- name: ODFE migration | Copy Elasticsearch directories to OpenSearch directories
+ copy:
+ src: "{{ item.1 }}"
+ dest: "{{ item.2 }}"
+ remote_src: true
+ owner: "{{ opensearch_variables.specification.opensearch_os_user }}"
+ group: "{{ opensearch_variables.specification.opensearch_os_group }}"
+ mode: u=rw,go=r
+ loop:
+ - {
+ 1: "/var/lib/elasticsearch-snapshots/",
+ 2: "{{ opensearch_variables.specification.paths.opensearch_snapshots_dir }}/",
+ }
+ - {
+ 1: "/var/lib/elasticsearch/",
+ 2: "{{ opensearch_variables.specification.paths.opensearch_data_dir }}",
+ }
+
+- name: ODFE migration | Prepare a list of Elasticsearch certs and keys
+ find:
+ paths: "/etc/elasticsearch/"
+ patterns: "*pem"
+ register: pem_files
+
+- name: ODFE migration | Copy a list of certs and keys to OpenSearch directories
+ copy:
+ src: "{{ item.path }}"
+ dest: "{{ opensearch_variables.specification.paths.opensearch_conf_dir }}"
+ remote_src: true
+ with_items: "{{ pem_files.files }}"
+
+- name: ODFE migration | Clone JVM configuration file
+ copy:
+ src: /etc/elasticsearch/jvm.options
+ dest: "{{ opensearch_variables.specification.paths.opensearch_conf_dir }}/jvm.options"
+ remote_src: true
+ owner: root
+ group: opensearch
+ mode: ug=rw,o=
+ backup: true
+
+- name: ODFE migration | Update JVM configuration file
+ replace:
+ path: "{{ opensearch_variables.specification.paths.opensearch_conf_dir }}/jvm.options"
+ regexp: "{{ item.1 }}"
+ replace: "{{ item.2 }}"
+ loop:
+ - { 1: 'elasticsearch', 2: 'opensearch' }
+ - { 1: '\${ES_TMPDIR}', 2: '${OPENSEARCH_TMPDIR}' }
+
+- name: ODFE migration | Clone main configuration file
+ copy:
+ src: /etc/elasticsearch/elasticsearch.yml
+ dest: "{{ opensearch_variables.specification.paths.opensearch_conf_dir }}/opensearch.yml"
+ remote_src: true
+ owner: root
+ group: opensearch
+ mode: ug=rw,o=
+ backup: true
+
+- name: ODFE migration | Update main configuration file
+ replace:
+ path: "{{ opensearch_variables.specification.paths.opensearch_conf_dir }}/opensearch.yml"
+ regexp: "{{ item.1 }}"
+ replace: "{{ item.2 }}"
+ loop:
+ - { 1: "elasticsearch", 2: "opensearch" }
+ - { 1: "EpiphanyElastic", 2: "EpiphanyOpensearch" }
+ - { 1: "opendistro_security.", 2: "plugins.security." }
+
+- name: Set fact with batch_metrics_enabled.conf path
+ set_fact:
+ _batch_metrics_enabled: >-
+ /usr/share/elasticsearch/data/batch_metrics_enabled.conf
+
+- name: Check if batch_metrics_enabled.conf exist
+ stat:
+ path: "{{ _batch_metrics_enabled }}"
+ register: batch_metrics_enabled
+
+# TODO: make this configurable
+- name: Create batch_metrics_enabled.conf
+ copy:
+ dest: "{{ _batch_metrics_enabled }}"
+ content: "false"
+ when: not batch_metrics_enabled.stat.exists
+
+- name: ODFE migration | Start OpenSearch service
+ systemd:
+ name: opensearch
+ state: started
+ enabled: true
+ register: restart_opensearch
+
+- name: ODFE migration | Wait for OpenSearch to be reachable
+ wait_for:
+ port: "{{ opensearch_defaults.ports.http }}"
+ host: "{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}"
+ sleep: 6
diff --git a/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe.yml
new file mode 100644
index 0000000000..cb21e7f396
--- /dev/null
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/migrate-odfe.yml
@@ -0,0 +1,203 @@
+---
+- name: OpenSearch | Get information about installed packages as facts
+ package_facts:
+ manager: auto
+ when: ansible_facts.packages is undefined
+
+- name: OpenSearch | Print ElasticSearch ond OpenSearch versions
+ debug:
+ msg:
+ - "Elasticsearch version currently installed: {{ ansible_facts.packages['elasticsearch-oss'][0].version }}"
+ - "OpenSearch version to be installed: {{ opensearch_defaults.file_name_version.opensearch[ansible_architecture].split('-')[1] }}"
+
+- name: ODFE migration | Ensure elasticsearch cluster is up and running
+ systemd:
+ name: elasticsearch
+ enabled: true
+ state: started
+ register: elasticsearch_state
+
+- name: ODFE migration | Set existing_config facts
+ include_tasks: opensearch/utils/get-config-from-files.yml
+
+- name: ODFE migration | Set common facts
+ set_fact:
+ es_host: "{{ existing_config.main['network.host'] | default('_local_') }}"
+ es_http_port: "{{ existing_config.main['http.port'] | default(opensearch_defaults.ports.http) }}"
+ es_transport_port: "{{ existing_config.main['transport.port'] | default(opensearch_defaults.ports.transport) }}"
+ es_clustered: "{{ (existing_config.main['discovery.seed_hosts'] | length > 1) | ternary(True, False) }}"
+ es_node_name: "{{ existing_config.main['node.name'] }}"
+ override_main_response_version_exist:
+ - "{{ existing_config.main['compatibility.override_main_response_version'] | default(false) }}"
+
+- name: ODFE migration | Prepare ODFE to OpenSearch migration
+ include_tasks:
+ file: opensearch/utils/prepare-cluster-for-node-restart.yml
+ apply:
+ delegate_to: "{{ target_inventory_hostname }}"
+ delegate_facts: true
+ loop: "{{ groups.logging | default([]) }}"
+ loop_control:
+ loop_var: target_inventory_hostname
+ vars:
+ es_api:
+ cert_type: Epiphany
+ cert_path: "{{ opensearch.upgrade_config.custom_admin_certificate.cert_path }}"
+ key_path: "{{ opensearch.upgrade_config.custom_admin_certificate.key_path }}"
+ url: https://{{ es_host }}:{{ es_http_port }}
+ fail_msg: API access test failed
+
+- name: ODFE migration | Run core migration tasks individually on each node
+ include_tasks:
+ file: opensearch/migrate-odfe-serial.yml
+ apply:
+ delegate_to: "{{ target_hostname }}"
+ delegate_facts: true
+ loop: "{{ groups.logging | default([]) }}"
+ loop_control:
+ loop_var: target_hostname
+ run_once: true
+
+- name: ODFE migration | Check if default admin user exists
+ uri:
+ url: "https://{{ inventory_hostname }}:{{ opensearch_defaults.ports.http }}/_opendistro/_security/api/internalusers/admin"
+ method: GET
+ # 404 code is used there as someone can remove admin user on its own.
+ status_code: [200, 404]
+ validate_certs: false
+ client_cert: "{{ opensearch.upgrade_config.custom_admin_certificate.cert_path }}"
+ client_key: "{{ opensearch.upgrade_config.custom_admin_certificate.key_path }}"
+ register: admin_check_response
+ until: admin_check_response is success
+ retries: 60
+ delay: 1
+ run_once: true
+
+- name: ODFE migration | Set OpenSearch admin password
+ uri:
+ url: "https://{{ inventory_hostname }}:{{ opensearch_defaults.ports.http }}/_opendistro/_security/api/internalusers"
+ method: PATCH
+ status_code: [200]
+ body:
+ - op: "replace"
+ path: "/admin"
+ value:
+ password: "{{ opensearch_variables.specification.admin_password }}"
+ reserved: "true"
+ backend_roles:
+ - "admin"
+ description: "Admin user"
+ client_cert: "{{ opensearch.upgrade_config.custom_admin_certificate.cert_path }}"
+ client_key: "{{ opensearch.upgrade_config.custom_admin_certificate.key_path }}"
+ body_format: json
+ validate_certs: false
+ register: uri_response
+ until: uri_response is success
+ retries: 5
+ delay: 1
+ run_once: true
+ when: admin_check_response.status == 200
+
+- name: ODFE migration | Check if kibanaserver user exists
+ uri:
+ url: "https://{{ inventory_hostname }}:{{ opensearch_defaults.ports.http }}/_opendistro/_security/api/internalusers/kibanaserver"
+ method: GET
+ # 404 code is used there as someone can remove admin user on its own.
+ status_code: [200, 404]
+ validate_certs: false
+ client_cert: "{{ opensearch.upgrade_config.custom_admin_certificate.cert_path }}"
+ client_key: "{{ opensearch.upgrade_config.custom_admin_certificate.key_path }}"
+ register: kibanaserver_check_response
+ until: kibanaserver_check_response is success
+ retries: 60
+ delay: 1
+ run_once: true
+
+- name: ODFE migration | Set kibanaserver user password
+ uri:
+ url: "https://{{ inventory_hostname }}:{{ opensearch_defaults.ports.http }}/_opendistro/_security/api/internalusers"
+ method: PATCH
+ status_code: [200]
+ body:
+ - op: "replace"
+ path: "/kibanaserver"
+ value:
+ password: "{{ opensearch_variables.specification.kibanaserver_password }}"
+ reserved: "true"
+ description: "kibanaserver user"
+ client_cert: "{{ opensearch.upgrade_config.custom_admin_certificate.cert_path }}"
+ client_key: "{{ opensearch.upgrade_config.custom_admin_certificate.key_path }}"
+ body_format: json
+ validate_certs: false
+ register: uri_response
+ until: uri_response is success
+ retries: 5
+ delay: 1
+ run_once: true
+ when: kibanaserver_check_response.status == 200
+
+- name: ODFE migration | Check if logstash user exists
+ uri:
+ url: "https://{{ inventory_hostname }}:{{ opensearch_defaults.ports.http }}/_opendistro/_security/api/internalusers/logstash"
+ method: GET
+ # 404 code is used there as someone can remove admin user on its own.
+ status_code: [200, 404]
+ validate_certs: false
+ client_cert: "{{ opensearch.upgrade_config.custom_admin_certificate.cert_path }}"
+ client_key: "{{ opensearch.upgrade_config.custom_admin_certificate.key_path }}"
+ register: logstash_check_response
+ until: logstash_check_response is success
+ retries: 60
+ delay: 1
+ run_once: true
+
+- name: ODFE migration | Set logstash user password
+ uri:
+ url: "https://{{ inventory_hostname }}:{{ opensearch_defaults.ports.http }}/_opendistro/_security/api/internalusers"
+ method: PATCH
+ status_code: [200]
+ body:
+ - op: "replace"
+ path: "/logstash"
+ value:
+ password: "{{ opensearch_variables.specification.logstash_password }}"
+ reserved: "true"
+ backend_roles:
+ - "logstash"
+ description: "Logstash user"
+ client_cert: "{{ opensearch.upgrade_config.custom_admin_certificate.cert_path }}"
+ client_key: "{{ opensearch.upgrade_config.custom_admin_certificate.key_path }}"
+ body_format: json
+ validate_certs: false
+ register: uri_response
+ until: uri_response is success
+ retries: 5
+ delay: 1
+ run_once: true
+ when: logstash_check_response.status == 200
+
+- name: ODFE migration | Check the OpenSearch status
+ command: curl https://{{ inventory_hostname }}:{{ opensearch_defaults.ports.http }}/_cluster/health?pretty -u 'admin:{{ opensearch_variables.specification.admin_password }}' -k
+ register: opensearch_status
+
+- name: ODFE migration | Show the OpenSearch status
+ debug:
+ msg: "{{ opensearch_status.stdout }}"
+ failed_when: "'number_of_nodes' not in opensearch_status.stdout"
+
+- name: ODFE migration | Reenable shard allocation for the cluster
+ include_tasks:
+ file: opensearch/utils/enable-shard-allocation.yml
+ apply:
+ delegate_to: "{{ target_inventory_hostname }}"
+ delegate_facts: true
+ loop: "{{ ansible_play_hosts_all }}"
+ loop_control:
+ loop_var: target_inventory_hostname
+ vars:
+ es_api:
+ cert_type: Epiphany
+ cert_path: "{{ opensearch.upgrade_config.custom_admin_certificate.cert_path }}"
+ key_path: "{{ opensearch.upgrade_config.custom_admin_certificate.key_path }}"
+ url: https://{{ es_host }}:{{ es_http_port }}
+ fail_msg: API access test failed.
diff --git a/ansible/playbooks/roles/upgrade/tasks/opensearch/pre-migrate.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/pre-migrate.yml
new file mode 100644
index 0000000000..2f349f3cdf
--- /dev/null
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/pre-migrate.yml
@@ -0,0 +1,27 @@
+---
+- name: OpenSearch | Ensure OpenSearch service OS group exists
+ group:
+ name: "{{ opensearch_variables.specification.opensearch_os_group }}"
+ state: present
+
+- name: OpenSearch | Ensure OpenSearch service OS user exists
+ user:
+ name: "{{ opensearch_variables.specification.opensearch_os_user }}"
+ state: present
+ shell: /bin/bash
+ groups: "{{ opensearch_variables.specification.opensearch_os_group }}"
+ home: "{{ opensearch_variables.specification.paths.opensearch_home }}"
+ create_home: true
+
+- name: OpenSearch | Ensure directory structure exists
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ opensearch_variables.specification.opensearch_os_user }}"
+ group: "{{ opensearch_variables.specification.opensearch_os_group }}"
+ mode: u=rwx,go=rx
+ loop:
+ - "{{ opensearch_variables.specification.paths.opensearch_log_dir }}"
+ - "{{ opensearch_variables.specification.paths.opensearch_conf_dir }}"
+ - "{{ opensearch_variables.specification.paths.opensearch_data_dir }}"
+ - "{{ opensearch_defaults.certificates.dirs.certs }}"
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-api-access.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-api-access.yml
similarity index 85%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-api-access.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-api-access.yml
index b9d36e1d9f..c99c75ad72 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-api-access.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-api-access.yml
@@ -1,5 +1,5 @@
---
-- name: ODFE | Assert input parameters
+- name: OpenSearch | Assert input parameters
assert:
that:
- es_api.cert_path is defined
@@ -13,7 +13,7 @@
# Sets 'test_api_access'
- include_tasks: test-api-access.yml
-- name: ODFE | Assert API access
+- name: OpenSearch | Assert API access
assert:
that: test_api_access.status == 200
fail_msg:
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-cert-files-exist.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-cert-files-exist.yml
similarity index 89%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-cert-files-exist.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-cert-files-exist.yml
index a4ad4f4f60..8166ad52af 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/assert-cert-files-exist.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/assert-cert-files-exist.yml
@@ -1,5 +1,5 @@
---
-- name: ODFE | Assert input parameters
+- name: OpenSearch | Assert input parameters
assert:
that:
- es_api.cert_path is defined
@@ -8,7 +8,7 @@
- es_api.key_path is defined
quiet: true
-- name: ODFE | Get info on files
+- name: OpenSearch | Get info on files
stat:
path: "{{ item }}"
get_attributes: false
@@ -20,7 +20,7 @@
- "{{ es_api.key_path }}"
# Specific case for custom certificates (we don't know the paths so they have to be specified manually)
-- name: ODFE | Assert files exist
+- name: OpenSearch | Assert files exist
assert:
that: stat_result.stat.exists
fail_msg: "{{ _custom_cert_fail_msg if (es_api.cert_type == 'custom') else _common_fail_msg }}"
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/create-dual-cert-file.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/create-dual-cert-file.yml
similarity index 68%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/create-dual-cert-file.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/create-dual-cert-file.yml
index 01946b94f6..316078d694 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/create-dual-cert-file.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/create-dual-cert-file.yml
@@ -3,16 +3,16 @@
# - certs_to_concatenate
# - target_path
-- name: ODFE | Read certificates to concatenate
+- name: OpenSearch | Read certificates to concatenate
slurp:
src: "{{ item }}"
register: _files
loop: "{{ certs_to_concatenate }}"
-- name: ODFE | Create dual root CA transitional file for migration
+- name: OpenSearch | Create dual root CA transitional file for migration
copy:
dest: "{{ target_path }}"
content: "{{ _files.results | map(attribute='content') | map('b64decode') | join('') }}"
mode: u=rw,g=r,o=
owner: root
- group: elasticsearch
+ group: opensearch
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/enable-shard-allocation.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/enable-shard-allocation.yml
similarity index 88%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/enable-shard-allocation.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/enable-shard-allocation.yml
index 8394d69fa2..4978f10a5a 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/enable-shard-allocation.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/enable-shard-allocation.yml
@@ -4,7 +4,7 @@
# - es_api.cert_path
# - es_api.key_path
-- name: ODFE | Enable shard allocation for the cluster
+- name: OpenSearch | Enable shard allocation for the cluster
uri:
url: "{{ es_api.url }}/_cluster/settings"
method: PUT
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-cluster-health.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-cluster-health.yml
similarity index 89%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-cluster-health.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-cluster-health.yml
index 9c0079f468..fae3164ded 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-cluster-health.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-cluster-health.yml
@@ -4,7 +4,7 @@
# - es_api.cert_path
# - es_api.key_path
-- name: ODFE | Get cluster health
+- name: OpenSearch | Get cluster health
uri:
url: "{{ es_api.url }}/_cluster/health"
method: GET
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-config-from-files.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-config-from-files.yml
similarity index 69%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-config-from-files.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-config-from-files.yml
index 814087368c..8678908038 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/get-config-from-files.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/get-config-from-files.yml
@@ -1,17 +1,17 @@
---
# Sets facts on existing configuration
-- name: ODFE | Load /etc/elasticsearch/elasticsearch.yml
+- name: OpenSearch | Load /etc/elasticsearch/elasticsearch.yml
slurp:
src: /etc/elasticsearch/elasticsearch.yml
register: _elasticsearch_yml
-- name: ODFE | Get Xmx value from /etc/elasticsearch/jvm.options
+- name: OpenSearch | Get Xmx value from /etc/elasticsearch/jvm.options
command: grep -oP '(?<=^-Xmx)\d+[kKmMgG]?' /etc/elasticsearch/jvm.options
register: _grep_xmx
changed_when: false
-- name: ODFE | Set existing configuration facts
+- name: OpenSearch | Set existing configuration facts
set_fact:
existing_config:
main: "{{ _elasticsearch_yml.content | b64decode | from_yaml }}"
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/prepare-cluster-for-node-restart.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/prepare-cluster-for-node-restart.yml
similarity index 89%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/prepare-cluster-for-node-restart.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/prepare-cluster-for-node-restart.yml
index 34bebc59cb..514c282258 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/prepare-cluster-for-node-restart.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/prepare-cluster-for-node-restart.yml
@@ -11,12 +11,12 @@
module_defaults:
uri:
client_cert: "{{ es_api.cert_path }}"
- client_key: "{{ es_api.key_path }}"
+ client_key: "{{ es_api.key_path }}"
validate_certs: false
body_format: json
block:
# It's safe to run this task many times regardless of the state
- - name: ODFE | Disable shard allocation for the cluster
+ - name: OpenSearch | Disable shard allocation for the cluster
uri:
url: "{{ es_api.url }}/_cluster/settings"
method: PUT
@@ -35,7 +35,7 @@
# In epicli 0.7.x there is ES 7.3.2 but this step is optional.
- name: Handle flush failure
block:
- - name: ODFE | Perform a synced flush (optional step)
+ - name: OpenSearch | Perform a synced flush (optional step)
uri:
url: "{{ es_api.url }}/_flush"
method: POST
@@ -46,7 +46,7 @@
retries: 120
delay: 1
rescue:
- - name: ODFE | Print warning
+ - name: OpenSearch | Print warning
debug:
msg:
- "WARNING: flush command failed"
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/restart-node.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/restart-node.yml
similarity index 74%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/restart-node.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/restart-node.yml
index c6348f7ee9..ee5c496756 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/restart-node.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/restart-node.yml
@@ -10,18 +10,18 @@
# - daemon_reload
# - skip_waiting_for_status
-- name: ODFE | Restart elasticsearch service
+- name: OpenSearch | Restart elasticsearch service
systemd:
- name: elasticsearch
+ name: opensearch
state: restarted
daemon_reload: "{{ daemon_reload | default(omit) }}"
-- name: ODFE | Wait for Elasticsearch transport port to become available
+- name: OpenSearch | Wait for Elasticsearch transport port to become available
wait_for:
port: "{{ es_transport_port }}"
host: "{{ hostvars[target_inventory_hostname].es_host }}"
-- name: ODFE | Wait for Elasticsearch http port to become available
+- name: OpenSearch | Wait for Elasticsearch http port to become available
wait_for:
port: "{{ es_http_port }}"
host: "{{ hostvars[target_inventory_hostname].es_host }}"
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/save-initial-cluster-status.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/save-initial-cluster-status.yml
similarity index 58%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/save-initial-cluster-status.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/save-initial-cluster-status.yml
index 9050c7799a..cd6253396c 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/save-initial-cluster-status.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/save-initial-cluster-status.yml
@@ -1,7 +1,7 @@
---
-- name: ODFE | Get size of upgrade state file
+- name: OpenSearch | Get size of upgrade state file
stat:
- path: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}"
+ path: "{{ opensearch.upgrade_state_file_path }}"
get_attributes: false
get_checksum: false
get_mime: false
@@ -12,7 +12,7 @@
block:
- include_tasks: get-cluster-health.yml
- - name: ODFE | Save cluster health to upgrade state file
+ - name: OpenSearch | Save cluster health to upgrade state file
copy:
content: "{{ cluster_health.json }}"
- dest: "{{ opendistro_for_elasticsearch.upgrade_state_file_path }}"
+ dest: "{{ opensearch.upgrade_state_file_path }}"
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/test-api-access.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/test-api-access.yml
similarity index 83%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/test-api-access.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/test-api-access.yml
index 8d8495e525..cb8e49d961 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/test-api-access.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/test-api-access.yml
@@ -5,7 +5,7 @@
# - es_api.key_path
# - es_api.url
-- name: ODFE | Test API access using {{ es_api.cert_type }} certificate
+- name: OpenSearch | Test API access using {{ es_api.cert_type }} certificate
uri:
client_cert: "{{ es_api.cert_path }}"
client_key: "{{ es_api.key_path }}"
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-cluster-status.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-cluster-status.yml
similarity index 93%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-cluster-status.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-cluster-status.yml
index 496198a4a0..78615ea41c 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-cluster-status.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-cluster-status.yml
@@ -5,7 +5,7 @@
# - es_api.key_path
# - expected_status (type: list, e.g. [ 'green', 'yellow' ])
-- name: ODFE | Wait for '{{ expected_status | join("' or '") }}' cluster health status
+- name: OpenSearch | Wait for '{{ expected_status | join("' or '") }}' cluster health status
uri:
url: "{{ es_api.url }}/_cluster/health"
client_cert: "{{ es_api.cert_path }}"
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-node-to-join.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-node-to-join.yml
similarity index 88%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-node-to-join.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-node-to-join.yml
index fcb039654c..82bf3ef35c 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-node-to-join.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-node-to-join.yml
@@ -6,7 +6,7 @@
# - target_inventory_hostname
# - hostvars[target_inventory_hostname].es_node_name
-- name: ODFE | Wait for Elasticsearch node to join the cluster
+- name: OpenSearch | Wait for Elasticsearch node to join the cluster
uri:
url: "{{ es_api.url }}/_cat/nodes?h=name"
method: GET
diff --git a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-shard-allocation.yml b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-shard-allocation.yml
similarity index 95%
rename from ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-shard-allocation.yml
rename to ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-shard-allocation.yml
index 0175d1b2d5..2517d57286 100644
--- a/ansible/playbooks/roles/upgrade/tasks/opendistro_for_elasticsearch/utils/wait-for-shard-allocation.yml
+++ b/ansible/playbooks/roles/upgrade/tasks/opensearch/utils/wait-for-shard-allocation.yml
@@ -4,7 +4,7 @@
# - es_api.cert_path
# - es_api.key_path
-- name: ODFE | Wait for the cluster to finish shard allocation
+- name: OpenSearch | Wait for the cluster to finish shard allocation
uri:
url: "{{ es_api.url }}/_cluster/health"
method: GET
diff --git a/ansible/playbooks/upgrade.yml b/ansible/playbooks/upgrade.yml
index 3c18ef5793..907a14e296 100644
--- a/ansible/playbooks/upgrade.yml
+++ b/ansible/playbooks/upgrade.yml
@@ -136,77 +136,32 @@
environment:
KUBECONFIG: "{{ kubeconfig.local }}"
+# Currently, the upgrade of opensearch/logging instances is disabled
# === logging ===
-# Some pre-upgrade tasks can be run in parallel (what saves time) while others must be run in serial (to support rolling upgrades).
-# Such a separation in Ansible can be applied only at play level thus we have two plays below.
-
-# play 1/2: pre-upgrade parallel tasks
-- hosts: logging
- become: true
- become_method: sudo
- tasks:
- - include_role:
- name: upgrade
- tasks_from: opendistro_for_elasticsearch-01
- when: "'logging' in upgrade_components or upgrade_components|length == 0"
- vars:
- current_group_name: logging
-
-# play 2/2: serial tasks
-- hosts: logging
- become: true
- become_method: sudo
- gather_facts: false # gathered by previous play
- serial: 1
- tasks:
- - include_role:
- name: upgrade
- tasks_from: opendistro_for_elasticsearch-02
- when: "'logging' in upgrade_components or upgrade_components|length == 0"
- vars:
- current_group_name: logging
-
-# === opendistro_for_elasticsearch ===
-
-# Some pre-upgrade tasks can be run in parallel (what saves time) while others must be run in serial (to support rolling upgrades).
-# Such a separation in Ansible can be applied only at play level thus we have two plays below.
-
-# play 1/2: parallel tasks
-- hosts: opendistro_for_elasticsearch
- become: true
- become_method: sudo
- tasks:
- - include_role:
- name: upgrade
- tasks_from: opendistro_for_elasticsearch-01
- when: "'opendistro_for_elasticsearch' in upgrade_components or upgrade_components|length == 0"
- vars:
- current_group_name: opendistro_for_elasticsearch
-
-# play 2/2: serial tasks
-- hosts: opendistro_for_elasticsearch
- become: true
- become_method: sudo
- gather_facts: false # gathered by previous play
- serial: 1
- tasks:
- - include_role:
- name: upgrade
- tasks_from: opendistro_for_elasticsearch-02
- when: "'opendistro_for_elasticsearch' in upgrade_components or upgrade_components|length == 0"
- vars:
- current_group_name: opendistro_for_elasticsearch
-
-- hosts: kibana
- become: true
- become_method: sudo
- serial: 1
- tasks:
- - import_role:
- name: upgrade
- tasks_from: kibana
- when: "'kibana' in upgrade_components or upgrade_components|length == 0"
+# - hosts: logging
+# become: true
+# become_method: sudo
+# tasks:
+# - include_role:
+# name: upgrade
+# tasks_from: opensearch
+# when: "'logging' in upgrade_components or upgrade_components|length == 0"
+# vars:
+# current_group_name: logging
+
+# === opensearch ===
+
+# - hosts: opensearch
+# become: true
+# become_method: sudo
+# tasks:
+# - include_role:
+# name: upgrade
+# tasks_from: opensearch
+# when: "'opensearch' in upgrade_components or upgrade_components|length == 0"
+# vars:
+# current_group_name: opensearch
- hosts: grafana
become: true
diff --git a/cli/epicli.py b/cli/epicli.py
index fbf55163f5..9f69a560d5 100644
--- a/cli/epicli.py
+++ b/cli/epicli.py
@@ -262,12 +262,12 @@ def upgrade_parser(subparsers):
'jmx_exporter',
'kafka',
'kafka_exporter',
- 'kibana',
+ 'opensearch_dashboards',
'kubernetes',
'load_balancer',
'logging',
'node_exporter',
- 'opendistro_for_elasticsearch',
+ 'opensearch',
'postgresql',
'postgres_exporter',
'prometheus',
diff --git a/cli/src/ansible/AnsibleVarsGenerator.py b/cli/src/ansible/AnsibleVarsGenerator.py
index d44fd8086a..63f7d55db3 100644
--- a/cli/src/ansible/AnsibleVarsGenerator.py
+++ b/cli/src/ansible/AnsibleVarsGenerator.py
@@ -73,7 +73,7 @@ def generate(self):
# are not compatible with the new ones, defaults are used for template processing
roles_with_defaults = [
'grafana', 'haproxy', 'image_registry', 'jmx_exporter', 'kafka', 'kafka_exporter',
- 'kibana', 'logging', 'node_exporter', 'postgres_exporter',
+ 'logging', 'node_exporter', 'opensearch', 'opensearch_dashboards', 'postgres_exporter',
'postgresql', 'prometheus', 'rabbitmq', 'repository'
]
# now lets add any external configs we want to load
diff --git a/docs/architecture/logical-view.md b/docs/architecture/logical-view.md
index 47d9acde34..ab3a65c922 100644
--- a/docs/architecture/logical-view.md
+++ b/docs/architecture/logical-view.md
@@ -51,14 +51,14 @@ Source | Purpose
/var/log/zookeeper/version-2/* | Zookeeper's logs
Containers | Kubernetes components that run in a container
-`Filebeat`, unlike `Grafana`, pushes data to database (`Elasticsearch`) instead of pulling them.
+`Filebeat`, unlike `Grafana`, pushes data to database (`OpenSearch`) instead of pulling them.
[Read more](https://www.elastic.co/products/beats/filebeat) about `Filebeat`.
-### Elasticsearch
+### OpenSearch
-`Elasticsearch` is highly scalable and full-text search enabled analytics engine. Epiphany Platform uses it for storage and analysis of logs.
+`OpenSearch` is highly scalable and full-text search enabled analytics engine. Epiphany Platform uses it for storage and analysis of logs.
-[Read more](https://www.elastic.co/guide/en/elasticsearch/reference/7.x/index.html)
+[Read more](https://opensearch.org/docs/latest)
### Elasticsearch Curator
@@ -66,11 +66,11 @@ Containers | Kubernetes components that run in a container
[Read more](https://www.elastic.co/guide/en/elasticsearch/client/curator/5.8/index.html)
-### Kibana
+### OpenSearch Dashboards
-`Kibana` like `Grafana` is used in Epiphany for visualization, in addition it has full text search capabilities. `Kibana` uses `Elasticsearch` as datasource for logs, it allows to create full text queries, dashboards and analytics that are performed on logs.
+`OpenSearch Dashboards` like `Grafana` is used in Epiphany for visualization. It uses `OpenSearch` as datasource for logs, it allows to create full text queries, dashboards and analytics that are performed on logs.
-[Read more](https://www.elastic.co/products/kibana)
+[Read more](https://opensearch.org/docs/latest/dashboards/index/)
## Computing
diff --git a/docs/architecture/process-view.md b/docs/architecture/process-view.md
index 366bb2ee83..a124c7fd16 100644
--- a/docs/architecture/process-view.md
+++ b/docs/architecture/process-view.md
@@ -24,8 +24,8 @@ metrics from different kinds of exporters.
## Logging
-Epiphany uses `Elasticsearch` as key-value database with `Filebeat` for gathering logs and `Kibana` as user interface to write queries and analyze logs.
+Epiphany uses `OpenSearch` as key-value database with `Filebeat` for gathering logs and `OpenSearch Dashboards` as user interface to write queries and analyze logs.
![Logging process view](diagrams/process-view/logging-process-view.svg)
-`Filebeat` gathers OS and application logs and ships them to `Elasticsearch`. Queries from `Kibana` are run against `Elasticsearch` key-value database.
\ No newline at end of file
+`Filebeat` gathers OS and application logs and ships them to `OpenSearch`. Queries from `Kibana` are run against `OpenSearch` key-value database.
\ No newline at end of file
diff --git a/docs/changelogs/CHANGELOG-0.5.md b/docs/changelogs/CHANGELOG-0.5.md
index 9f1a8f9e36..9acb3929a2 100644
--- a/docs/changelogs/CHANGELOG-0.5.md
+++ b/docs/changelogs/CHANGELOG-0.5.md
@@ -82,7 +82,7 @@
- [#381](https://github.com/epiphany-platform/epiphany/issues/381) - Add AWS EC2 Root Volume encryption
- [#782](https://github.com/epiphany-platform/epiphany/issues/781) - All disks encryption documentation - AWS
- [#782](https://github.com/epiphany-platform/epiphany/issues/782) - All disks encryption documentation - Azure
-- [#784](https://github.com/epiphany-platform/epiphany/issues/784) - Switch to Open Distro for Elasticsearch
+- [#784](https://github.com/epiphany-platform/epiphany/issues/784) - Switch to Open Distro for ElasticSearch
- [Data storage](/docs/home/howto/DATABASES.md#how-to-start-working-with-opendistro-for-elasticsearch)
- [Centralized logging](/docs/home/howto/LOGGING.md#centralized-logging-setup)
diff --git a/docs/changelogs/CHANGELOG-2.0.md b/docs/changelogs/CHANGELOG-2.0.md
index 54acab99aa..4111b0ea03 100644
--- a/docs/changelogs/CHANGELOG-2.0.md
+++ b/docs/changelogs/CHANGELOG-2.0.md
@@ -9,8 +9,8 @@
### Updated
- [#3080](https://github.com/epiphany-platform/epiphany/issues/3080) - update Filebeat to the latest compatible version with OpenSearch
-
- [#2982](https://github.com/epiphany-platform/epiphany/issues/2982) - Using AKS and EKS Terraform configuration directly with Epiphany.
+- [#2870](https://github.com/epiphany-platform/epiphany/issues/2870) - OpenDistro for ElasticSearch replaced by OpenSearch
### Deprecated
diff --git a/docs/home/ARM.md b/docs/home/ARM.md
index 1fc7b36ad1..896112e0c1 100644
--- a/docs/home/ARM.md
+++ b/docs/home/ARM.md
@@ -33,7 +33,7 @@ Besides making sure that the selected providers, operating systems, components a
| monitoring | :heavy_check_mark: | :x: | :x: |
| load_balancer | :heavy_check_mark: | :x: | :x: |
| postgresql | :heavy_check_mark: | :x: | :x: |
-| opendistro_for_elasticsearch | :heavy_check_mark: | :x: | :x: |
+| opensearch | :heavy_check_mark: | :x: | :x: |
| single_machine | :heavy_check_mark: | :x: | :x: |
***Notes***
@@ -96,9 +96,9 @@ specification:
rabbitmq:
count: 2
machine: rabbitmq-machine-arm
- opendistro_for_elasticsearch:
+ opensearch:
count: 1
- machine: opendistro-machine-arm
+ machine: opensearch-machine-arm
repository:
count: 1
machine: repository-machine-arm
@@ -168,7 +168,7 @@ specification:
ip: x.x.x.x
---
kind: infrastructure/virtual-machine
-name: opendistro-machine-arm
+name: opensearch-machine-arm
provider: any
based_on: logging-machine
specification:
@@ -319,9 +319,9 @@ specification:
machine: rabbitmq-machine-arm
subnets:
- address_pool: 10.1.8.0/24
- opendistro_for_elasticsearch:
+ opensearch:
count: 1
- machine: opendistro-machine-arm
+ machine: opensearch-machine-arm
subnets:
- address_pool: 10.1.10.0/24
repository:
@@ -394,7 +394,7 @@ specification:
size: a1.medium
---
kind: infrastructure/virtual-machine
-name: opendistro-machine-arm
+name: opensearch-machine-arm
provider: aws
based_on: logging-machine
specification:
diff --git a/docs/home/COMPONENTS.md b/docs/home/COMPONENTS.md
index e062dd103f..09eeb1db4c 100644
--- a/docs/home/COMPONENTS.md
+++ b/docs/home/COMPONENTS.md
@@ -19,10 +19,6 @@ Note that versions are default versions and can be changed in certain cases thro
| RabbitMQ | 3.8.9 | https://github.com/rabbitmq/rabbitmq-server | [Mozilla Public License](https://www.mozilla.org/en-US/MPL/) |
| Docker CE | 20.10.8 | https://docs.docker.com/engine/release-notes/ | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) |
| KeyCloak | 14.0.0 | https://github.com/keycloak/keycloak | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) |
-| Elasticsearch OSS | 7.10.2 | https://github.com/elastic/elasticsearch | https://github.com/elastic/elasticsearch/blob/master/LICENSE.txt |
-| Elasticsearch Curator OSS | 5.8.3 | https://github.com/elastic/curator | https://github.com/elastic/curator/blob/master/LICENSE.txt |
-| Opendistro for Elasticsearch | 1.13.x | https://opendistro.github.io/for-elasticsearch/ | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) |
-| Opendistro for Elasticsearch Kibana | 1.13.1 | https://opendistro.github.io/for-elasticsearch-docs/docs/kibana/ | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) |
| Filebeat | 7.12.1 | https://github.com/elastic/beats | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) |
| Filebeat Helm Chart | 7.12.1 | https://github.com/elastic/helm-charts | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) |
| Prometheus | 2.31.1 | https://github.com/prometheus/prometheus | [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) |
@@ -181,6 +177,8 @@ Note that versions are default versions and can be changed in certain cases thro
| msrest | 0.6.21 | https://github.com/Azure/msrest-for-python | [MIT License](https://api.github.com/repos/azure/msrest-for-python/license) |
| msrestazure | 0.6.4 | https://github.com/Azure/msrestazure-for-python | [MIT License](https://api.github.com/repos/azure/msrestazure-for-python/license) |
| oauthlib | 3.1.1 | https://github.com/oauthlib/oauthlib | [BSD 3-Clause "New" or "Revised" License](https://api.github.com/repos/oauthlib/oauthlib/license) |
+| OpenSearch | 1.2.4 | https://github.com/opensearch-project/OpenSearch | [Apache License 2.0](https://www.apache.org/licenses/) |
+| OpenSearch Dashboards | 1.2.0 | https://github.com/opensearch-project/OpenSearch-Dashboards | [Apache License 2.0](https://www.apache.org/licenses/) |
| packaging | 20.9 | https://github.com/pypa/packaging | [Other](https://api.github.com/repos/pypa/packaging/license) |
| paramiko | 2.9.2 | https://paramiko.org | LGPL |
| pathlib2 | 2.3.6 | https://github.com/mcmtroffaes/pathlib2 | [MIT License](https://api.github.com/repos/mcmtroffaes/pathlib2/license) |
diff --git a/docs/home/HOWTO.md b/docs/home/HOWTO.md
index 64b83f94dc..41943431e0 100644
--- a/docs/home/HOWTO.md
+++ b/docs/home/HOWTO.md
@@ -34,8 +34,8 @@
- [How to configure scalable Prometheus setup](./howto/MONITORING.md#how-to-configure-scalable-prometheus-setup)
- [Import and create Grafana dashboards](./howto/MONITORING.md#import-and-create-grafana-dashboards)
- [How to setup default admin password and user in Grafana](./howto/MONITORING.md#how-to-setup-default-admin-password-and-user-in-grafana)
- - [How to configure Kibana - Open Distro](./howto/MONITORING.md#how-to-configure-kibana---open-distro)
- - [How to configure default user passwords for Kibana - Open Distro, Open Distro for Elasticsearch and Filebeat](./howto/MONITORING.md#how-to-configure-default-user-passwords-for-kibana---open-distro-open-distro-for-elasticsearch-and-filebeat)
+ - [How to configure OpenSearch Dashboards](./howto/MONITORING.md#how-to-configure-opensearch-dashboards)
+ - [How to configure default passwords for service users in OpenSearch Dashboards, OpenSearch and Filebeat](./howto/MONITORING.md#how-to-configure-default-passwords-for-service-users-in-opensearch-dashboards-opensearch-and-filebeat)
- [How to configure scalable Prometheus setup](./howto/MONITORING.md#how-to-configure-scalable-prometheus-setup)
- [How to configure Azure additional monitoring and alerting](./howto/MONITORING.md#how-to-configure-azure-additional-monitoring-and-alerting)
- [How to configure AWS additional monitoring and alerting](./howto/MONITORING.md#how-to-configure-aws-additional-monitoring-and-alerting)
@@ -59,6 +59,7 @@
- [Run apply after upgrade](./howto/UPGRADE.md#run-apply-after-upgrade)
- [Kubernetes applications](./howto/UPGRADE.md#kubernetes-applications)
- [Kafka upgrade](./howto/UPGRADE.md#how-to-upgrade-kafka)
+ - [Migration from Open Distro for Elasticsearch to OpenSearch](./howto/UPGRADE.md#migration-from-open-distro-for-elasticsearch--kibana-to-opensearch-and-opensearch-dashboards)
- [Open Distro for Elasticsearch upgrade](./howto/UPGRADE.md#open-distro-for-elasticsearch-upgrade)
- [Node exporter upgrade](./howto/UPGRADE.md#node-exporter-upgrade)
- [RabbitMQ upgrade](./howto/UPGRADE.md#rabbitmq-upgrade)
diff --git a/docs/home/RESOURCES.md b/docs/home/RESOURCES.md
index 03dac4c716..75adb34694 100644
--- a/docs/home/RESOURCES.md
+++ b/docs/home/RESOURCES.md
@@ -42,8 +42,8 @@ Here are some materials concerning Epiphany tooling and cluster components - bot
2. [RabbitMQ](https://www.rabbitmq.com/)
- [RabbitMQ Getting started](https://www.rabbitmq.com/getstarted.html)
5. Central logging
- 1. [Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html)
- 2. [Kibana](https://www.elastic.co/guide/en/kibana/current/index.html)
+ 1. [OpenSearch Dashboards](https://opensearch.org/docs/latest/dashboards/index/)
+ 2. [OpenSearch](https://opensearch.org/docs/latest)
3. [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html)
- Beats platform reference(https://www.elastic.co/guide/en/beats/libbeat/current/index.html)
6. Load Balancing
diff --git a/docs/home/SECURITY.md b/docs/home/SECURITY.md
index e66633969e..df2fe12407 100644
--- a/docs/home/SECURITY.md
+++ b/docs/home/SECURITY.md
@@ -11,8 +11,12 @@ changes made in settings of your antivirus/antimalware solution.
## Contents
-- [Users and roles created by Epiphany](#users-and-roles-created-by-epiphany)
-- [Ports used by components in Epiphany](#ports-used-by-components-in-epiphany)
+- [Security related information](#security-related-information)
+ - [Contents](#contents)
+ - [Users and roles created by epiphany](#users-and-roles-created-by-epiphany)
+ - [Ports used by components in Epiphany](#ports-used-by-components-in-epiphany)
+ - [Connection protocols and ciphers used by components in Epiphany](#connection-protocols-and-ciphers-used-by-components-in-epiphany)
+ - [Notes](#notes)
### Users and roles created by epiphany
@@ -61,15 +65,15 @@ different values. The list does not include ports that are bound to the loopback
- 9093 - encrypted communication (if TLS/SSL is enabled)
- unconfigurable random port from ephemeral range - JMX (for local access only), see note [[1]](#notes)
-5. Elasticsearch:
+5. OpenSearch:
- - 9200 - Elasticsearch REST communication
- - 9300 - Elasticsearch nodes communication
+ - 9200 - OpenSearch REST communication
+ - 9300 - OpenSearch nodes communication
- 9600 - Performance Analyzer (REST API)
-6. Kibana:
+6. OpenSearch Dashboards:
- - 5601 - Kibana web UI
+ - 5601 - OpenSearch Dashboards web UI
7. Prometheus:
diff --git a/docs/home/howto/BACKUP.md b/docs/home/howto/BACKUP.md
index 45ee9378dc..14f84c7d06 100644
--- a/docs/home/howto/BACKUP.md
+++ b/docs/home/howto/BACKUP.md
@@ -125,11 +125,11 @@ Recovery includes all backed up files
Logging backup includes:
-- Elasticsearch database snapshot
-- Elasticsearch configuration ``/etc/elasticsearch/``
-- Kibana configuration ``/etc/kibana/``
+- OpenSearch database snapshot
+- OpenSearch configuration ``/usr/share/opensearch/config/``
+- OpenSearch Dashboards configuration ``/usr/share/opensearch-dashboards/config/``
-Only single-node Elasticsearch backup is supported. Solution for multi-node Elasticsearch cluster will be added in
+Only single-node OpenSearch backup is supported. Solution for multi-node OpenSearch cluster will be added in
future release.
### Monitoring
diff --git a/docs/home/howto/CLUSTER.md b/docs/home/howto/CLUSTER.md
index 5bd2fd1f0d..dcd8a80a88 100644
--- a/docs/home/howto/CLUSTER.md
+++ b/docs/home/howto/CLUSTER.md
@@ -571,7 +571,7 @@ specification:
count: 0
rabbitmq:
count: 0
- opendistro_for_elasticsearch:
+ opensearch:
count: 0
single_machine:
count: 1
@@ -753,7 +753,7 @@ Kubernetes master | :heavy_check_mark: | :x: | :heavy_check_mark: | :heavy_check
Kubernetes node | :heavy_check_mark: | :x: | :heavy_check_mark: | :heavy_check_mark: | [#1580](https://github.com/epiphany-platform/epiphany/issues/1580)
Kafka | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | ---
Load Balancer | :heavy_check_mark: | :heavy_check_mark: | :x: | :x: | ---
-Opendistro for elasticsearch | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | ---
+OpenSearch | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | ---
Postgresql | :x: | :x: | :heavy_check_mark: | :heavy_check_mark: | [#1577](https://github.com/epiphany-platform/epiphany/issues/1577)
RabbitMQ | :heavy_check_mark: | :heavy_check_mark: | :x: | :heavy_check_mark: | [#1578](https://github.com/epiphany-platform/epiphany/issues/1578), [#1309](https://github.com/epiphany-platform/epiphany/issues/1309)
RabbitMQ K8s | :heavy_check_mark: | :heavy_check_mark: | :x: | :heavy_check_mark: | [#1486](https://github.com/epiphany-platform/epiphany/issues/1486)
diff --git a/docs/home/howto/DATABASES.md b/docs/home/howto/DATABASES.md
index f07752058f..3b84bfb861 100644
--- a/docs/home/howto/DATABASES.md
+++ b/docs/home/howto/DATABASES.md
@@ -453,11 +453,10 @@ Properly configured application (kubernetes service) to use fully HA configurati
PostgreSQL native replication is now deprecated and removed.
Use [PostgreSQL HA replication with repmgr](#how-to-set-up-postgresql-ha-replication-with-repmgr-cluster) instead.
-## How to start working with OpenDistro for Elasticsearch
+## How to start working with OpenSearch
-OpenDistro for Elasticsearch
-is [an Apache 2.0-licensed distribution of Elasticsearch enhanced with enterprise security, alerting, SQL](https://opendistro.github.io/for-elasticsearch/).
-In order to start working with OpenDistro change machines count to value greater than 0 in your cluster configuration:
+OpenSearch is the [successor](https://opendistro.github.io/for-elasticsearch-docs/) of OpenDistro for ElasticSearch project. Epiphany is providing an [automated solution](./UPGRADE.md#migration-from-open-distro-for-elasticsearch--kibana-to-opensearch-and-opensearch-dashboards) for migrating your existing ODFE installation to OpenSearch.
+On the other hand, if you plan to just start working with OpenSearch - change machines count to value greater than 0 in your cluster configuration:
```yaml
kind: epiphany-cluster
@@ -473,22 +472,22 @@ specification:
...
logging:
count: 1
- opendistro_for_elasticsearch:
+ opensearch:
count: 2
```
-**Installation with more than one node will always be clustered** - Option to configure the non-clustered installation of more than one node for Open Distro is not supported.
+**Installation with more than one node will always be clustered** - Option to configure the non-clustered installation of more than one node for OpenSearch is not supported.
```yaml
-kind: configuration/opendistro-for-elasticsearch
-title: OpenDistro for Elasticsearch Config
+kind: configuration/opensearch
+title: OpenSearch Config
name: default
specification:
- cluster_name: EpiphanyElastic
+ cluster_name: EpiphanyOpenSearch
```
-By default, Kibana is deployed only for `logging` component. If you want to deploy Kibana
-for `opendistro_for_elasticsearch` you have to modify feature mapping. Use below configuration in your manifest.
+By default, OpenSearch Dashboards ( previously Kibana component ) is deployed only for `logging` component. If you want to deploy it
+for `opensearch` component you have to modify feature mapping. Use below configuration in your manifest:
```yaml
kind: configuration/feature-mapping
@@ -496,12 +495,11 @@ title: "Feature mapping to roles"
name: default
specification:
roles_mapping:
- opendistro_for_elasticsearch:
- - opendistro-for-elasticsearch
+ opensearch:
- node-exporter
- filebeat
- firewall
- - kibana
+ - opensearch-dashboards
```
-Filebeat running on `opendistro_for_elasticsearch` hosts will always point to centralized logging hosts (./LOGGING.md).
+Filebeat running on `opensearch` hosts will always point to centralized logging hosts ( [more info](./LOGGING.md) ).
diff --git a/docs/home/howto/LOGGING.md b/docs/home/howto/LOGGING.md
index e419b2a543..1f7a711073 100644
--- a/docs/home/howto/LOGGING.md
+++ b/docs/home/howto/LOGGING.md
@@ -1,119 +1,114 @@
# Centralized logging setup
-For centralized logging Epiphany uses [OpenDistro for Elasticsearch](https://opendistro.github.io/for-elasticsearch/).
-In order to enable centralized logging, be sure that `count` property for `logging` feature is greater than 0 in your
+For centralized logging Epiphany uses [Open Search](https://opensearch.org/) stack - an opensource successor[1] of Elasticsearch & Kibana projects.
+
+In order to enable centralized logging, be sure to set `count` property for `logging` feature to the value greater than 0 in your
configuration manifest.
```yaml
kind: epiphany-cluster
-...
+[...]
specification:
- ...
+ [...]
components:
kubernetes_master:
count: 1
kubernetes_node:
count: 0
- ...
+ [...]
logging:
- count: 1
- ...
+ count: 1 # <<------
+ [...]
```
## Default feature mapping for logging
+Below example shows a default feature mapping for logging:
```yaml
-...
-logging:
- - logging
- - kibana
- - node-exporter
- - filebeat
- - firewall
+[...]
+roles_mapping:
+[...]
+ logging:
+ - logging
+ - opensearch-dashboards
+ - node-exporter
+ - filebeat
+ - firewall
...
```
-The `logging` role replaced `elasticsearch` role. This change was done to enable Elasticsearch usage also for data
+The `logging` role has replaced `elasticsearch` role. This change was done to enable Elasticsearch usage also for data
storage - not only for logs as it was till 0.5.0.
-Default configuration of `logging` and `opendistro_for_elasticsearch` roles is identical (
-./DATABASES.md#how-to-start-working-with-opendistro-for-elasticsearch). To modify configuration of centralized logging
-adjust and use the following defaults in your manifest:
+Default configuration of `logging` and `opensearch` roles is identical ( more info [here](./DATABASES.md#how-to-start-working-with-opensearch) ). To modify configuration of centralized logging
+adjust to your needs the following default values in your manifest:
```yaml
+[...]
kind: configuration/logging
title: Logging Config
name: default
specification:
- cluster_name: EpiphanyElastic
+ cluster_name: EpiphanyOpensearch
clustered: True
paths:
- data: /var/lib/elasticsearch
- repo: /var/lib/elasticsearch-snapshots
- logs: /var/log/elasticsearch
+ data: /var/lib/opensearch
+ repo: /var/lib/opensearch-snapshots
+ logs: /var/log/opensearch
```
-## How to manage Opendistro for Elasticsearch data
+## How to manage OpenSearch data
-Elasticsearch stores data using JSON documents, and an Index is a collection of documents. As in every database, it's
-crucial to correctly maintain data in this one. It's almost impossible to deliver database configuration which will fit
-to every type of project and data stored in. Epiphany deploys preconfigured Opendistro Elasticsearch, but this
-configuration may not meet user requirements. Before going to production, configuration should be tailored to the
-project needs. All configuration tips and tricks are available
-in [official documentation](https://opendistro.github.io/for-elasticsearch-docs/).
+OpenSearch stores data using JSON documents, and an Index is a collection of documents. As in every database, it's crucial to correctly maintain data in this one. It's almost impossible to deliver database configuration which will fit to every type of project and data stored in. Epiphany deploys preconfigured OpenSearch instance but this configuration may not meet any single user requirements. That's why, before going to production, stack configuration should be tailored to the project needs. All configuration tips and tricks are available in [official documentation](https://opensearch.org/docs/latest).
-The main and most important decisions to take before you deploy cluster are:
+The main and most important decisions to take before you deploy the cluster are:
-1) How many Nodes are needed
-2) How big machines and/or storage data disks need to be used
+- how many nodes are needed
+- how big machines and/or storage data disks need to be used
-These parameters are defined in yaml file, and it's important to create a big enough cluster.
+These parameters can be defined in manifest yaml file. It is important to create a big enough cluster.
```yaml
specification:
+ [..]
components:
logging:
- count: 1 # Choose number of nodes
+ count: 1 # Choose number of nodes that suits your needs
+ machines:
+ - logging-machine-n
+ [..]
---
kind: infrastructure/virtual-machine
title: "Virtual Machine Infra"
-name: logging-machine
+name: logging-machine-n
specification:
- size: Standard_DS2_v2 # Choose machine size
+ size: Standard_DS2_v2 # Choose a VM size that suits your needs
```
-If it's required to have Elasticsearch which works in cluster formation configuration, except setting up more than one
-machine in yaml config file please acquaint dedicated
-support [article](https://opendistro.github.io/for-elasticsearch-docs/docs/elasticsearch/cluster/) and adjust
-Elasticsearch configuration file.
+If it's required to have OpenSearch instance which works in cluster formation configuration, except setting up more than one machine in yaml config file please acquaint dedicated
+support [article](https://opensearch.org/docs/latest/troubleshoot/index/) and adjust
+OpenSearch configuration file.
-At this moment Opendistro for Elasticsearch does not support plugin similar
-to [ILM](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-lifecycle-management.html), log rotation
-is possible only by configuration created in Index State Management.
+We also want to strongly encourage you to get familiar with a bunch of plugins and policies available along with OpenSearch with the following ones among them:
-`ISM - Index State Management` - is a plugin that provides users and administrative panel to monitor the indices and
-apply policies at different index stages. ISM lets users automate periodic, administrative operations by triggering them
-based on index age, size, or number of documents. Using the ISM plugin, can define policies that automatically handle
-index rollovers or deletions. ISM is installed with Opendistro by default - user does not have to enable this. Official
-documentation is available
-in [Opendistro for Elasticsearch website](https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/).
+`ISM - Index State Management` - is a plugin that allows users and administrative panel to monitor the indices and apply policies at different index stages. ISM lets users automate periodic, administrative operations by triggering them based on index age, size, or number of documents. Using the ISM plugin, can define policies that automatically handle index rollovers or deletions. Official plugin documentation is available [here](https://opensearch.org/docs/latest/im-plugin/ism/index/).
To reduce the consumption of disk resources, every index you created should use
-well-designed [policy](https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/policies/).
+well-designed [policy](https://opensearch.org/docs/latest/im-plugin/ism/policies/).
Among others these two index actions might save machine from filling up disk space:
-[`Index Rollover`](https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/policies/#rollover) - rolls an alias
+[`Index Rollover`](https://opensearch.org/docs/latest/im-plugin/ism/policies/#rollover) - rolls an alias
to a new index. Set up correctly max index size / age or minimum number of documents to keep index size in requirements
framework.
-[`Index Deletion`](https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/policies/#delete) - deletes indexes
+[`Index Deletion`](https://opensearch.org/docs/latest/im-plugin/ism/policies/#delete) - deletes indexes
managed by policy
-Combining these actions, adapting them to data amount and specification users are able to create policy which will
-maintain data in cluster for example: to secure node from fulfilling disk space.
+Combining these actions and adapting them to data amount and specification, users are able to create policy which will
+maintain their data in cluster for example to secure node from fulfilling disk space.
-There is example of policy below. Be aware that this is only example, and it needs to be adjusted to environment needs.
+There is an example of such policy below. Be aware that this is only example and as avery example it needs to be adjusted to actual environment needs.
```json
{
@@ -181,64 +176,64 @@ There is example of policy below. Be aware that this is only example, and it nee
}
```
-Example above shows configuration with rollover daily or when index achieve 1 GB size. Indexes older than 14 days will
+Example above shows configuration with rollover index policy on a daily basis or when the index achieve 1 GB size. Indexes older than 14 days will
be deleted. States and conditionals could be combined. Please
-see [policies](https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/policies/) documentation for more
+see [policies](https://opensearch.org/docs/latest/im-plugin/ism/policies/) documentation for more
details.
-`Apply Policy`
+#### Apply Policy
-To apply policy use similar API request as presented below:
+To apply a policy you can use similar API request as presented below:
-```
-PUT _template/template_01
+```sh
+PUT _index_template/ism_rollover
```
```json
{
"index_patterns": ["filebeat*"],
"settings": {
- "opendistro.index_state_management.rollover_alias": "filebeat"
- "opendistro.index_state_management.policy_id": "epi_policy"
+ "plugins.index_state_management.rollover_alias": "filebeat"
+ "plugins.index_state_management.policy_id": "epi_policy"
}
}
```
After applying this policy, every new index created under this one will apply to it. There is also possibility to apply
-policy to already existing policies by assigning them to policy in Index Management Kibana panel.
+policy to already existing policies by assigning them to policy in dashboard Index Management panel.
-## How to export Kibana reports to CSV format
+## How to export Dashboards reports
-Since v1.0 Epiphany provides the possibility to export reports from Kibana to CSV, PNG or PDF using the Open Distro for
-Elasticsearch Kibana reports feature.
+Since v1.0 Epiphany provides the possibility to export reports from Kibana to CSV, PNG or PDF using the Open Distro for Elasticsearch Kibana reports feature. And after migrating from Elastic stack to OpenSearch stack you can make use of the OpenSearch Reporting feature to achieve this and more.
-Check more details about the plugin and how to export reports in the
-[documentation](https://opendistro.github.io/for-elasticsearch-docs/docs/kibana/reporting)
+Check more details about the OpenSearch Reports plugin and how to export reports in the
+[documentation](https://github.com/opensearch-project/dashboards-reports/blob/main/README.md#opensearch-dashboards-reports).
-`Note: Currently in Open Distro for Elasticsearch Kibana the following plugins are installed and enabled by default: security, alerting, anomaly detection, index management, query workbench, notebooks, reports, alerting, gantt chart plugins.`
+Notice: Currently in the OpenSearch stack the following plugins are installed and enabled by default: security, alerting, anomaly detection, index management, query workbench, notebooks, reports, alerting, gantt chart plugins.
-You can easily check enabled default plugins for Kibana using the following command on the logging machine:
-`./bin/kibana-plugin list` in Kibana directory.
+You can easily check enabled default plugins for Dashboards component using the following command on the logging machine:
+`./bin/opensearch-dashboards-plugin list` in directory where you've installed _opensearch-dashboards_.
---
## How to add multiline support for Filebeat logs
-In order to properly handle multilines in files harvested by Filebeat you have to provide `multiline` definition in the
-configuration manifest. Using the following code you will be able to specify which lines are part of a single event.
+In order to properly handle multiline outputs in files harvested by Filebeat you have to provide `multiline` definition in the cluster configuration manifest. Using the following code you will be able to specify which lines are part of a single event.
By default, postgresql block is provided, you can use it as example:
```yaml
+[..]
postgresql_input:
multiline:
pattern: >-
'^\d{4}-\d{2}-\d{2} '
negate: true
match: after
+[..]
```
-Supported inputs: `common_input`,`postgresql_input`,`container_input`
+Supported inputs: `common_input`,`postgresql_input`,`container_input`.
More details about multiline options you can find in
the [official documentation](https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html)
@@ -253,19 +248,29 @@ specification:
k8s_as_cloud_service: true
```
-## How to use default Kibana dashboards
+## How to use default OpenSearch dashboards
+
+---
+This feature is not working in current version of OpenSearch and so the `setup.dashboards.enabled` is set with value _false_ as a workaround.
+---
It is possible to configure `setup.dashboards.enabled` and `setup.dashboards.index` Filebeat settings using `specification.kibana.dashboards` key in `configuration/filebeat` doc.
-When `specification.kibana.dashboards.enabled` is set to `auto`, the corresponding setting in Filebeat configuration file will be set to `true` only if Kibana is configured to be present on the host.
+When `specification.kibana.dashboards.enabled` is set to `auto`, the corresponding setting in Filebeat configuration file will be set to `true` only if OpenSearch Dashboards component is configured to be present on the host.
Other possible values are `true` and `false`.
Default configuration:
-```
+```yaml
specification:
- kibana:
+[..]
+ opensearch:
dashboards:
enabled: auto
index: filebeat-*
```
-Note: Setting `specification.kibana.dashboards.enabled` to `true` not providing Kibana will result in a Filebeat crash.
+Notice: Setting `specification.kibana.dashboards.enabled` to `true` not providing Kibana will result in a Filebeat crash.
+
+
+
+---
+[1] More information about migrating from Elasticsearch & Kibana to OpenSearch & OpenSearch Dashboards can be found [here](./UPGRADE.md#migration-from-open-distro-for-elasticsearch--kibana-to-opensearch-and-opensearch-dashboards).
diff --git a/docs/home/howto/MAINTENANCE.md b/docs/home/howto/MAINTENANCE.md
index 52cc3de205..f42ead43c6 100644
--- a/docs/home/howto/MAINTENANCE.md
+++ b/docs/home/howto/MAINTENANCE.md
@@ -121,12 +121,12 @@ To check status of Node Exporter, use the command:
status prometheus-node-exporter
```
-#### - Elasticsearch
+#### - OpenSearch
-To check status of Elasticsearch, use the command:
+To check the status of OpenSearch we can use the command:
```shell
-systemct status elasticsearch
+systemct status opensearch
```
Check if service is listening on 9200 (API communication port):
@@ -141,7 +141,7 @@ Check if service is listening on 9300 (nodes communication port):
netstat -antup | grep 9300
```
-Check status of Elasticsearch cluster:
+We can also check the status of OpenSearch cluster:
```shell
:9200/_cluster/health
diff --git a/docs/home/howto/MONITORING.md b/docs/home/howto/MONITORING.md
index 3f2917c2d0..4516657a35 100644
--- a/docs/home/howto/MONITORING.md
+++ b/docs/home/howto/MONITORING.md
@@ -11,10 +11,10 @@ Grafana:
- [How to setup default admin password and user in Grafana](#how-to-setup-default-admin-password-and-user-in-grafana)
- [Import and create Grafana dashboards](#import-and-create-grafana-dashboards)
-Kibana:
+OpenSearch Dashboards:
-- [How to configure Kibana](#how-to-configure-kibana)
-- [How to configure default user password in Kibana](#how-to-configure-default-user-password-in-kibana)
+- [How to configure Dashboards](#how-to-configure-opensearch-dashboards)
+- [How to configure default passwords for service users in OpenSearch Dashboards, OpenSearch and Filebeat](#how-to-configure-default-passwords-for-service-users-in-opensearch-dashboards-opensearch-and-filebeat)
RabbitMQ:
@@ -231,50 +231,47 @@ When dashboard creation or import succeeds you will see it on your dashboard lis
*Note: For some dashboards, there is no data to visualize until there is traffic activity for the monitored component.*
-# Kibana
+# OpenSearch Dashboards
-Kibana is an free and open frontend application that sits on top of the Elastic Stack, providing search and data visualization capabilities for data indexed in Elasticsearch. For more informations about Kibana please refer to [the official website](https://www.elastic.co/what-is/kibana).
+OpenSearch Dashboards ( a Kibana counterpart ) is an open source search and analytics visualization layer. It also serves as a user interface for many OpenSearch project plugins. For more information please refer to [the official website](https://opensearch.org/docs/latest/dashboards/index/).
-## How to configure Kibana - Open Distro
+## How to configure OpenSearch Dashboards
-In order to start viewing and analyzing logs with Kibana, you first need to add an index pattern for Filebeat according to the following steps:
+In order to start viewing and analyzing logs with Dashboards tool, you first need to add an index pattern for Filebeat according to the following procedure:
-1. Goto the `Management` tab
-2. Select `Index Patterns`
-3. On the first step define as index pattern:
+1. Goto the `Stack Management` tab
+2. Select `Index Patterns` --> `Create index pattern`
+3. Define an index pattern:
`filebeat-*`
- Click next.
+ and click next.
4. Configure the time filter field if desired by selecting `@timestamp`. This field represents the time that events occurred or were processed. You can choose not to have a time field, but you will not be able to narrow down your data by a time range.
-This filter pattern can now be used to query the Elasticsearch indices.
+This filter pattern can now be used to query the OpenSsearch indices.
-By default Kibana adjusts the UTC time in `@timestamp` to the browser's local timezone. This can be changed in `Management` > `Advanced Settings` > `Timezone for date formatting`.
+By default OpenSearch Dashoboards adjusts the UTC time in `@timestamp` to the browser's local timezone. This can be changed in `Stack Management` > `Advanced Settings` > `Timezone for date formatting`.
-## How to configure default user passwords for Kibana - Open Distro, Open Distro for Elasticsearch and Filebeat
+## How to configure default passwords for service users in OpenSearch Dashboards, OpenSearch and Filebeat
-To configure admin password for Kibana - Open Distro and Open Distro for Elasticsearch you need to follow the procedure below.
-There are separate procedures for `logging` and `opendistro-for-elasticsearch` roles since most of the times for `opendistro-for-elasticsearch`, `kibanaserver` and `logstash` users are not required to be present.
+To configure admin password for OpenSearch Dashoboards ( previously Kibana ) and OpenSearch you need to follow the procedure below.
+There are separate procedures for `logging` and `opensearch` roles since for most of the time `opensearch`, `kibanaserver` and `logstash` users are not required to be present.
### Logging component
-#### - Logging role
+#### Logging role
-By default Epiphany removes users that are listed in `demo_users_to_remove` section of `configuration/logging` doc.
-By default, `kibanaserver` user (needed by default Epiphany installation of Kibana) and `logstash` (needed by default Epiphany
-installation of Filebeat) are not removed. If you want to perform configuration by Epiphany, set `kibanaserver_user_active` to `true`
-for `kibanaserver` user or `logstash_user_active` for `logstash` user. For `logging` role, those settings are already set to `true` by default.
+By default Epiphany removes users that are listed in `demo_users_to_remove` section of `configuration/logging` manifest document.
+Additionally, `kibanaserver`[1] user (needed by default Epiphany installation of Dashboards) and `logstash` user (needed by default Epiphany installation of Filebeat) are not removed. If you want to perform configuration by Epiphany, set `kibanaserver_user_active` to `true`
+for `kibanaserver` user and/or `logstash_user_active` to `true` for `logstash` user. For `logging` role, those settings are already set to `true` by default.
We strongly advice to set different password for each user.
-To change `admin` user's password, change value for `admin_password` key. For `kibanaserver` and `logstash`, change values
-for `kibanaserver_password` and `logstash_password` keys respectively. Changes from logging role will be propagated to Kibana
-and Filebeat configuration.
+To change `admin` user's password, you need to change the value for `admin_password` key ( see the example below ). For `kibanaserver` and `logstash`, you need to change values for `kibanaserver_password` and `logstash_password` keys respectively. Changes from logging role will be propagated to OpenSearch Dashboards and Filebeat configuration accordingly.
```yaml
kind: configuration/logging
title: Logging Config
name: default
specification:
- ...
+ [...]
admin_password: YOUR_PASSWORD
kibanaserver_password: YOUR_PASSWORD
kibanaserver_user_active: true
@@ -286,32 +283,27 @@ specification:
- snapshotrestore
```
-#### - Kibana role
+#### OpenSearch Dashboards ( Kibana ) role
-To set password of `kibanaserver` user, which is used by Kibana for communication with Open Distro Elasticsearch backend follow the procedure
-described in [Logging role](#-logging-role).
+To set password for `kibanaserver` user, which is used by Dashboards for communication with OpenSearch Dashboards backend follow the procedure described in [Logging role](#logging-role).
-#### - Filebeat role
+#### Filebeat role
-To set password of `logstash` user, which is used by Filebeat for communication with Open Distro Elasticsearch backend follow the procedure described
-in [Logging role](#-logging-role).
+To set password of `logstash` user, which is used by Filebeat for communication with OpenSearch Dashboards backend follow the procedure described in [Logging role](#-logging-role).
-### Open Distro for Elasticsearch component
+### OpenSearch component
-By default Epiphany removes all demo users except `admin` user. Those users are listed in `demo_users_to_remove` section
-of `configuration/opendistro-for-elasticsearch` doc. If you want to keep `kibanaserver` user (needed by default Epiphany installation of Kibana),
-you need to remove it from `demo_users_to_remove` list and set `kibanaserver_user_active` to `true` in order to change the default password.
+By default Epiphany removes all demo users except `admin` user. Those users are listed in `demo_users_to_remove` section of `configuration/opensearch` manifest doc ( see example below ). If you want to keep `kibanaserver` user (needed by default Epiphany installation of OpenSearch Dashboards), you need to exclude it from `demo_users_to_remove` list and set `kibanaserver_user_active` to `true` in order to change the default password.
We strongly advice to set different password for each user.
-To change `admin` user's password, change value for `admin_password` key. For `kibanaserver` and `logstash`, change values for `kibanaserver_password`
-and `logstash_password` keys respectively.
+To change `admin` user's password, change value for the `admin_password` key. For `kibanaserver` and `logstash`, change values for `kibanaserver_password` and `logstash_password` keys respectively.
```yaml
-kind: configuration/opendistro-for-elasticsearch
-title: Open Distro for Elasticsearch Config
+kind: configuration/opensearch
+title: OpenSearch Config
name: default
specification:
- ...
+ [...]
admin_password: YOUR_PASSWORD
kibanaserver_password: YOUR_PASSWORD
kibanaserver_user_active: false
@@ -325,9 +317,13 @@ specification:
- kibanaserver
```
-### Upgrade of Elasticsearch, Kibana and Filebeat
+### Upgrade of OpenSearch, OpenSearch Dashboards and Filebeat
+
+Keep in mind that during the upgrade process Epiphany takes `kibanaserver` (for Dashboards) and `logstash` (for Filebeat) user passwords and re-applies them to upgraded configuration of Filebeat and Kibana. So if these password phrases differ from what was setup before upgrade, you should reflect these changes upon next login process.
+
+Epiphany upgrade of OpenSearch, OpenSearch Dashboards or Filebeat components will fail if `kibanaserver` or `logstash` usernames were changed in configuration of OpenSearch, OpenSearch Dashboards or Filebeat before.
-During upgrade Epiphany takes `kibanaserver` (for Kibana) and `logstash` (for Filebeat) user passwords and re-applies them to upgraded configuration of Filebeat and Kibana. Epiphany upgrade of Open Distro, Kibana or Filebeat will fail if `kibanaserver` or `logstash` usernames were changed in configuration of Kibana, Filebeat or Open Distro for Elasticsearch.
+[1] For the backward compatibility needs, some naming conventions ( ie. kibanaserver user name ) are still present within the new ( OpenSearch ) platform though they will be suppresed in the future. In aftermath, Epiphany stack is also still using these names.
# HAProxy
diff --git a/docs/home/howto/RETENTION.md b/docs/home/howto/RETENTION.md
index 6ae5b8d87f..753b84ab42 100644
--- a/docs/home/howto/RETENTION.md
+++ b/docs/home/howto/RETENTION.md
@@ -1,7 +1,7 @@
An Epiphany cluster has a number of components which log, collect and retain data. To make sure that these do not exceed
the usable storage of the machines they running on, the following configurations are available.
-## Elasticsearch
+## OpenSearch
TODO
diff --git a/docs/home/howto/SECURITY_GROUPS.md b/docs/home/howto/SECURITY_GROUPS.md
index d9f84a09f3..2e0d0f6694 100644
--- a/docs/home/howto/SECURITY_GROUPS.md
+++ b/docs/home/howto/SECURITY_GROUPS.md
@@ -278,7 +278,7 @@ specification:
count: 0
rabbitmq:
count: 0
- opendistro_for_elasticsearch:
+ opensearch:
count: 0
single_machine:
count: 0
diff --git a/docs/home/howto/UPGRADE.md b/docs/home/howto/UPGRADE.md
index 676c33f9ff..49db4473f5 100644
--- a/docs/home/howto/UPGRADE.md
+++ b/docs/home/howto/UPGRADE.md
@@ -1,5 +1,45 @@
# Upgrade
+- [Upgrade](#upgrade)
+ - [Introduction](#introduction)
+ - [Online upgrade](#online-upgrade)
+ - [Online prerequisites](#online-prerequisites)
+ - [Start the online upgrade](#start-the-online-upgrade)
+ - [Offline upgrade](#offline-upgrade)
+ - [Offline prerequisites](#offline-prerequisites)
+ - [Start the offline upgrade](#start-the-offline-upgrade)
+ - [Additional parameters](#additional-parameters)
+ - [Run *apply* after *upgrade*](#run-apply-after-upgrade)
+ - [Kubernetes applications](#kubernetes-applications)
+ - [How to upgrade Kafka](#how-to-upgrade-kafka)
+ - [Kafka upgrade](#kafka-upgrade)
+ - [ZooKeeper upgrade](#zookeeper-upgrade)
+ - [Migration from Open Distro for Elasticsearch & Kibana to OpenSearch and OpenSearch Dashboards](#migration-from-open-distro-for-elasticsearch--kibana-to-opensearch-and-opensearch-dashboards)
+ - [Open Distro for Elasticsearch upgrade](#open-distro-for-elasticsearch-upgrade)
+ - [Node exporter upgrade](#node-exporter-upgrade)
+ - [RabbitMQ upgrade](#rabbitmq-upgrade)
+ - [Kubernetes upgrade](#kubernetes-upgrade)
+ - [Prerequisites](#prerequisites)
+ - [PostgreSQL upgrade](#postgresql-upgrade)
+ - [Versions](#versions)
+ - [Prerequisites](#prerequisites-1)
+ - [Upgrade](#upgrade-1)
+ - [Manual actions](#manual-actions)
+ - [Post-upgrade processing](#post-upgrade-processing)
+ - [Statistics](#statistics)
+ - [Delete old cluster](#delete-old-cluster)
+ - [Terraform upgrade from Epiphany 1.x to 2.x](#terraform-upgrade-from-epiphany-1x-to-2x)
+ - [Azure](#azure)
+ - [v0.12.6 => v0.13.x](#v0126--v013x)
+ - [v0.13.x => v0.14.x](#v013x--v014x)
+ - [v0.14.x => v1.0.x](#v014x--v10x)
+ - [v1.0.x => v1.1.3](#v10x--v113)
+ - [AWS](#aws)
+ - [v0.12.6 => v0.13.x](#v0126--v013x-1)
+ - [v0.13.x => v0.14.x](#v013x--v014x-1)
+ - [v0.14.x => v1.0.x](#v014x--v10x-1)
+ - [v1.0.x => v1.1.3](#v10x--v113-1)
+
## Introduction
From Epicli 0.4.2 and up the CLI has the ability to perform upgrades on certain components on a cluster. The components
@@ -51,10 +91,10 @@ Your airgapped existing cluster should meet the following requirements:
3. The cluster machines/vm`s should be accessible through SSH with a set of SSH keys you provided and configured on each
machine yourself.
4. A provisioning machine that:
- - Has access to the SSH keys
- - Has access to the build output from when the cluster was first created.
- - Is on the same network as your cluster machines
- - Has Epicli 0.4.2 or up running.
+- Has access to the SSH keys
+- Has access to the build output from when the cluster was first created.
+- Is on the same network as your cluster machines
+- Has Epicli 0.4.2 or up running.
*Note. To run Epicli check the [Prerequisites](./PREREQUISITES.md)*
### Start the online upgrade
@@ -86,10 +126,10 @@ Your airgapped existing cluster should meet the following requirements:
- Runs the same distribution as the airgapped cluster machines/vm`s (AlmaLinux 8, RedHat 8, Ubuntu 20.04)
- Has access to the internet.
5. A provisioning machine that:
- - Has access to the SSH keys
- - Has access to the build output from when the cluster was first created.
- - Is on the same network as your cluster machines
- - Has Epicli 0.4.2 or up running.
+- Has access to the SSH keys
+- Has access to the build output from when the cluster was first created.
+- Is on the same network as your cluster machines
+- Has Epicli 0.4.2 or up running.
---
**NOTE**
@@ -200,7 +240,7 @@ specification:
count: 1
rabbitmq:
count: 0
- opendistro_for_elasticsearch:
+ opensearch:
count: 0
name: clustername
prefix: 'prefix'
@@ -260,18 +300,30 @@ then start with the rest **one by one**.
More detailed information about ZooKeeper you can find
in [ZooKeeper documentation](https://cwiki.apache.org/confluence/display/ZOOKEEPER).
-## Open Distro for Elasticsearch upgrade
+## Migration from Open Distro for Elasticsearch & Kibana to OpenSearch and OpenSearch Dashboards
---
**NOTE**
-Before upgrade procedure make sure you have a data backup!
+Make sure you have a backup before proceeding to migration steps described below!
---
+Following the decision of Elastic NV[1] on ceasing open source options available for Elasticsearch and Kibana and releasing them under the Elastic license (more info [here](https://github.com/epiphany-platform/epiphany/issues/2870)) Epiphany team decided to implement a mechanism of automatic migration from ElasticSearch 7.10.2 to OpenSearch 1.2.4.
+
+It is important to remember, that while the new platform makes an effort to continue to support a broad set of third party tools (ie. Beats) there can be some drawbacks or even malfunctions as not everything has been tested or has explicitly been added to OpenSearch compatibility scope[2].
+Additionally some of the components (ie. ElasticSearch Curator) or some embedded service accounts ( ie. *kibanaserver*) can be still found in OpenSearch environment but they will be phased out.
+
+Keep in mind, that for the current version of OpenSearch and OpenSearch Dashboards it is necessary to include the `filebeat` component along with the loggging one in order to implement the workaround for *Kibana API not available* [bug](https://github.com/opensearch-project/OpenSearch-Dashboards/issues/656#issuecomment-978036236).
+
+Upgrade of the ESS/ODFE versions not shipped with the previous Epiphany releases is not supported. If your environment is customized it needs to be standardized ( as described in [this](https://opensearch.org/docs/latest/upgrade-to/upgrade-to/#upgrade-paths) table ) prior to running the subject migration.
+
+Migration of Elasticsearch Curator is not supported. More info on use of Curator in OpenSearch environment can be found [here](https://github.com/opensearch-project/OpenSearch/issues/1352).
+
+[1] https://www.elastic.co/pricing/faq/licensing#what-are-the-key-changes-being-made-to-the-elastic-license
+
+[2] https://opensearch.org/docs/latest/clients/agents-and-ingestion-tools/index/
-Since Epiphany v1.0.0 we provide upgrade elasticsearch-oss package to v7.10.2 and opendistro-\* plugins package to
-v1.13.\*. Upgrade will be performed automatically when the upgrade procedure detects your `logging`
-, `opendistro_for_elasticsearch` or `kibana` hosts.
+Upgrade will be performed automatically when the upgrade procedure detects your `logging`, `opensearch` or `kibana` hosts.
Upgrade of Elasticsearch uses API calls (GET, PUT, POST) which requires an admin TLS certificate. By default, Epiphany
generates self-signed certificates for this purpose but if you use your own, you have to provide the admin certificate's
@@ -284,7 +336,7 @@ logging:
cert_path: /etc/elasticsearch/custom-admin.pem
key_path: /etc/elasticsearch/custom-admin-key.pem
-opendistro_for_elasticsearch:
+opensearch:
upgrade_config:
custom_admin_certificate:
cert_path: /etc/elasticsearch/custom-admin.pem
diff --git a/schema/any/defaults/configuration/minimal-cluster-config.yml b/schema/any/defaults/configuration/minimal-cluster-config.yml
index 02a57099b1..c1210abf61 100644
--- a/schema/any/defaults/configuration/minimal-cluster-config.yml
+++ b/schema/any/defaults/configuration/minimal-cluster-config.yml
@@ -46,10 +46,10 @@ specification:
count: 1
machines:
- default-rabbitmq
- opendistro_for_elasticsearch:
+ opensearch:
count: 1
machines:
- - default-opendistro
+ - default-opensearch
---
kind: infrastructure/machine
provider: any
@@ -130,7 +130,7 @@ specification:
---
kind: infrastructure/machine
provider: any
-name: default-opendistro
+name: default-opensearch
specification:
- hostname: opendistro # YOUR-MACHINE-HOSTNAME
+ hostname: opensearch # YOUR-MACHINE-HOSTNAME
ip: 192.168.100.112 # YOUR-MACHINE-IP
diff --git a/schema/any/defaults/epiphany-cluster.yml b/schema/any/defaults/epiphany-cluster.yml
index 27a3014ac2..43218bcee7 100644
--- a/schema/any/defaults/epiphany-cluster.yml
+++ b/schema/any/defaults/epiphany-cluster.yml
@@ -41,7 +41,7 @@ specification:
count: 0
machines: []
configuration: default
- opendistro_for_elasticsearch:
+ opensearch:
count: 0
machines: []
configuration: default
diff --git a/schema/aws/defaults/configuration/minimal-cluster-config.yml b/schema/aws/defaults/configuration/minimal-cluster-config.yml
index 629e1b5675..f7f232f825 100644
--- a/schema/aws/defaults/configuration/minimal-cluster-config.yml
+++ b/schema/aws/defaults/configuration/minimal-cluster-config.yml
@@ -34,5 +34,5 @@ specification:
count: 1
rabbitmq:
count: 1
- opendistro_for_elasticsearch:
+ opensearch:
count: 1
diff --git a/schema/aws/defaults/epiphany-cluster.yml b/schema/aws/defaults/epiphany-cluster.yml
index f50a21cb6d..336a1c9ec3 100644
--- a/schema/aws/defaults/epiphany-cluster.yml
+++ b/schema/aws/defaults/epiphany-cluster.yml
@@ -76,7 +76,7 @@ specification:
subnets:
- address_pool: 10.1.8.0/24
availability_zone: eu-west-2a
- opendistro_for_elasticsearch:
+ opensearch:
count: 0
machine: logging-machine
configuration: default
diff --git a/schema/aws/defaults/infrastructure/virtual-machine.yml b/schema/aws/defaults/infrastructure/virtual-machine.yml
index 7e27b4ebfa..d61c68ba14 100644
--- a/schema/aws/defaults/infrastructure/virtual-machine.yml
+++ b/schema/aws/defaults/infrastructure/virtual-machine.yml
@@ -364,8 +364,8 @@ specification:
destination_port_range: "9300"
source_address_prefix: "10.1.0.0/20"
destination_address_prefix: "0.0.0.0/0"
- - name: Kibana
- description: Allow Kibana
+ - name: OpenSearchDashboards
+ description: Allow OpenSearch Dashboards
direction: Inbound
protocol: "Tcp"
destination_port_range: "5601"
diff --git a/schema/azure/defaults/configuration/minimal-cluster-config.yml b/schema/azure/defaults/configuration/minimal-cluster-config.yml
index ecb4d2b695..2d2784511a 100644
--- a/schema/azure/defaults/configuration/minimal-cluster-config.yml
+++ b/schema/azure/defaults/configuration/minimal-cluster-config.yml
@@ -32,5 +32,5 @@ specification:
count: 1
rabbitmq:
count: 1
- opendistro_for_elasticsearch:
+ opensearch:
count: 1
diff --git a/schema/azure/defaults/epiphany-cluster.yml b/schema/azure/defaults/epiphany-cluster.yml
index 6e5026e9a7..66d0dd318f 100644
--- a/schema/azure/defaults/epiphany-cluster.yml
+++ b/schema/azure/defaults/epiphany-cluster.yml
@@ -76,7 +76,7 @@ specification:
configuration: default
subnets:
- address_pool: 10.1.8.0/24
- opendistro_for_elasticsearch:
+ opensearch:
count: 0
alt_component_name: ''
machine: logging-machine
diff --git a/schema/azure/defaults/infrastructure/virtual-machine.yml b/schema/azure/defaults/infrastructure/virtual-machine.yml
index cd5f43129e..b0a14ec2dc 100644
--- a/schema/azure/defaults/infrastructure/virtual-machine.yml
+++ b/schema/azure/defaults/infrastructure/virtual-machine.yml
@@ -389,8 +389,8 @@ specification:
destination_port_range: "9300"
source_address_prefix: "10.1.0.0/20"
destination_address_prefix: "0.0.0.0/0"
- - name: Kibana
- description: Allow Kibana
+ - name: OpenSearchDashboards
+ description: Allow OpenSearch Dashboards
priority: 203
direction: Inbound
access: Allow
diff --git a/schema/common/defaults/configuration/feature-mapping.yml b/schema/common/defaults/configuration/feature-mapping.yml
index 72d4ad09b0..9d6db5dc01 100644
--- a/schema/common/defaults/configuration/feature-mapping.yml
+++ b/schema/common/defaults/configuration/feature-mapping.yml
@@ -17,11 +17,11 @@ specification:
enabled: true
- name: logging
enabled: true
- - name: opendistro-for-elasticsearch
+ - name: opensearch
enabled: true
- name: elasticsearch-curator
enabled: true
- - name: kibana
+ - name: opensearch-dashboards
enabled: true
- name: filebeat
enabled: true
@@ -68,7 +68,7 @@ specification:
- firewall
logging:
- logging
- - kibana
+ - opensearch-dashboards
- node-exporter
- filebeat
- firewall
@@ -126,8 +126,8 @@ specification:
- node-exporter
- filebeat
- firewall
- opendistro_for_elasticsearch:
- - opendistro-for-elasticsearch
+ opensearch:
+ - opensearch
- node-exporter
- filebeat
- firewall
diff --git a/schema/common/defaults/configuration/filebeat.yml b/schema/common/defaults/configuration/filebeat.yml
index 22f77ff9a8..0aeb0a3220 100644
--- a/schema/common/defaults/configuration/filebeat.yml
+++ b/schema/common/defaults/configuration/filebeat.yml
@@ -2,7 +2,7 @@ kind: configuration/filebeat
title: Filebeat
name: default
specification:
- kibana:
+ opensearch:
dashboards:
index: filebeat-*
enabled: auto
diff --git a/schema/common/defaults/configuration/firewall.yml b/schema/common/defaults/configuration/firewall.yml
index 8a9d66493c..4c5c3e2c7a 100644
--- a/schema/common/defaults/configuration/firewall.yml
+++ b/schema/common/defaults/configuration/firewall.yml
@@ -45,7 +45,7 @@ specification:
enabled: true
ports:
- 9308/tcp
- kibana:
+ opensearch_dashboards:
enabled: true
ports:
- 5601/tcp
@@ -71,7 +71,7 @@ specification:
enabled: true
ports:
- 9100/tcp
- opendistro_for_elasticsearch:
+ opensearch:
enabled: true
ports:
- 9200/tcp
diff --git a/schema/common/defaults/configuration/kibana.yml b/schema/common/defaults/configuration/kibana.yml
deleted file mode 100644
index bea9fbb13b..0000000000
--- a/schema/common/defaults/configuration/kibana.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-kind: configuration/kibana
-title: "Kibana"
-name: default
-specification:
- kibana_log_dir: /var/log/kibana
diff --git a/schema/common/defaults/configuration/logging.yml b/schema/common/defaults/configuration/logging.yml
index be687c2e65..cbbd2c3be2 100644
--- a/schema/common/defaults/configuration/logging.yml
+++ b/schema/common/defaults/configuration/logging.yml
@@ -2,23 +2,30 @@ kind: configuration/logging
title: Logging Config
name: default
specification:
- cluster_name: EpiphanyElastic
+ cluster_name: EpiphanyOpenSearch
+ opensearch_os_user: opensearch
+ opensearch_os_group: opensearch
admin_password: PASSWORD_TO_CHANGE
kibanaserver_password: PASSWORD_TO_CHANGE
kibanaserver_user_active: true
logstash_password: PASSWORD_TO_CHANGE
logstash_user_active: true
demo_users_to_remove:
- - kibanaro
- - readall
- - snapshotrestore
+ - kibanaro
+ - readall
+ - snapshotrestore
+ # - logstash
+ # - kibanaservers
paths:
- data: /var/lib/elasticsearch
- repo: /var/lib/elasticsearch-snapshots
- logs: /var/log/elasticsearch
+ opensearch_home: /usr/share/opensearch
+ opensearch_conf_dir: /usr/share/opensearch/config
+ opensearch_log_dir: /var/log/opensearch
+ opensearch_snapshots_dir: /var/lib/opensearch-snapshots
+ opensearch_data_dir: /var/lib/opensearch
+ opensearch_perftop_dir: /usr/share/opensearch/perftop
jvm_options:
- Xmx: 1g # see https://www.elastic.co/guide/en/elasticsearch/reference/7.9/heap-size.html
- opendistro_security:
+ Xmx: 1g
+ opensearch_security:
ssl:
transport:
enforce_hostname_verification: true
diff --git a/schema/common/defaults/configuration/opendistro-for-elasticsearch.yml b/schema/common/defaults/configuration/opendistro-for-elasticsearch.yml
deleted file mode 100644
index 9f3979d722..0000000000
--- a/schema/common/defaults/configuration/opendistro-for-elasticsearch.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-kind: configuration/opendistro-for-elasticsearch
-title: Open Distro for Elasticsearch Config
-name: default
-specification:
- cluster_name: EpiphanyElastic
- clustered: true
- admin_password: PASSWORD_TO_CHANGE
- kibanaserver_password: PASSWORD_TO_CHANGE
- kibanaserver_user_active: false
- logstash_password: PASSWORD_TO_CHANGE
- logstash_user_active: false
- demo_users_to_remove:
- - kibanaro
- - readall
- - snapshotrestore
- - logstash
- - kibanaserver
- paths:
- data: /var/lib/elasticsearch
- repo: /var/lib/elasticsearch-snapshots
- logs: /var/log/elasticsearch
- jvm_options:
- Xmx: 1g # see https://www.elastic.co/guide/en/elasticsearch/reference/7.9/heap-size.html
- opendistro_security:
- ssl:
- transport:
- enforce_hostname_verification: true
diff --git a/schema/common/defaults/configuration/opensearch-dashboards.yml b/schema/common/defaults/configuration/opensearch-dashboards.yml
new file mode 100644
index 0000000000..5ca8dab00e
--- /dev/null
+++ b/schema/common/defaults/configuration/opensearch-dashboards.yml
@@ -0,0 +1,13 @@
+kind: configuration/opensearch-dashboards
+title: "OpenSearch-Dashboards"
+name: default
+specification:
+ dashboards_os_user: opensearch_dashboards
+ dashboards_os_group: opensearch_dashboards
+ dashboards_user: kibanaserver
+ dashboards_user_password: PASSWORD_TO_CHANGE
+ paths:
+ dashboards_home: /usr/share/opensearch-dashboards
+ dashboards_conf_dir: /usr/share/opensearch-dashboards/config
+ dashboards_plugin_bin_path: /usr/share/opensearch-dashboards/bin/opensearch-dashboards-plugin
+ dashboards_log_dir: /var/log/opensearch-dashboards
diff --git a/schema/common/defaults/configuration/opensearch.yml b/schema/common/defaults/configuration/opensearch.yml
new file mode 100644
index 0000000000..a936930b0d
--- /dev/null
+++ b/schema/common/defaults/configuration/opensearch.yml
@@ -0,0 +1,31 @@
+kind: configuration/opensearch
+title: OpenSearch Config
+name: default
+specification:
+ cluster_name: EpiphanyOpenSearch
+ opensearch_os_user: opensearch
+ opensearch_os_group: opensearch
+ admin_password: PASSWORD_TO_CHANGE
+ kibanaserver_password: PASSWORD_TO_CHANGE
+ kibanaserver_user_active: true
+ logstash_password: PASSWORD_TO_CHANGE
+ logstash_user_active: true
+ demo_users_to_remove:
+ - kibanaro
+ - readall
+ - snapshotrestore
+ # - logstash
+ # - kibanaserver
+ paths:
+ opensearch_home: /usr/share/opensearch
+ opensearch_conf_dir: /usr/share/opensearch/config
+ opensearch_log_dir: /var/log/opensearch
+ opensearch_snapshots_dir: /var/lib/opensearch-snapshots
+ opensearch_data_dir: /var/lib/opensearch
+ opensearch_perftop_dir: /usr/share/opensearch/perftop
+ jvm_options:
+ Xmx: 1g
+ opensearch_security:
+ ssl:
+ transport:
+ enforce_hostname_verification: true
diff --git a/schema/common/validation/configuration/feature-mapping.yml b/schema/common/validation/configuration/feature-mapping.yml
index 85b954b095..f6791f4b43 100644
--- a/schema/common/validation/configuration/feature-mapping.yml
+++ b/schema/common/validation/configuration/feature-mapping.yml
@@ -55,7 +55,7 @@ properties:
type: array
items:
type: string
- opendistro_for_elasticsearch:
+ opensearch:
type: array
items:
type: string
diff --git a/schema/common/validation/configuration/filebeat.yml b/schema/common/validation/configuration/filebeat.yml
index 02c7af95dc..6c0597f390 100644
--- a/schema/common/validation/configuration/filebeat.yml
+++ b/schema/common/validation/configuration/filebeat.yml
@@ -3,7 +3,7 @@ title: "Filebeat specification schema"
description: "Filebeat specification schema"
type: object
properties:
- kibana:
+ opensearch:
type: object
properties:
dashboards:
diff --git a/schema/common/validation/configuration/firewall.yml b/schema/common/validation/configuration/firewall.yml
index 82148a9453..9a90b5f0ce 100644
--- a/schema/common/validation/configuration/firewall.yml
+++ b/schema/common/validation/configuration/firewall.yml
@@ -89,7 +89,7 @@ properties:
type: array
items:
type: string
- kibana:
+ opensearch_dashboards:
type: object
properties:
enabled:
@@ -134,7 +134,7 @@ properties:
type: array
items:
type: string
- opendistro_for_elasticsearch:
+ opensearch:
type: object
properties:
enabled:
diff --git a/schema/common/validation/configuration/kibana.yml b/schema/common/validation/configuration/kibana.yml
deleted file mode 100644
index 17b77c2e15..0000000000
--- a/schema/common/validation/configuration/kibana.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-"$id": "#/specification"
-title: "Kibana specification schema"
-description: "Kibana specification schema"
-type: object
-properties:
- kibana_log_dir:
- type: string
diff --git a/schema/common/validation/configuration/logging.yml b/schema/common/validation/configuration/logging.yml
index 2a434160a0..12dcf7bea8 100644
--- a/schema/common/validation/configuration/logging.yml
+++ b/schema/common/validation/configuration/logging.yml
@@ -32,7 +32,7 @@ properties:
properties:
Xmx:
type: string
- opendistro_security:
+ opensearch_security:
type: object
properties:
ssl:
diff --git a/schema/common/validation/configuration/opensearch-dashboards.yml b/schema/common/validation/configuration/opensearch-dashboards.yml
new file mode 100644
index 0000000000..f16250fbf2
--- /dev/null
+++ b/schema/common/validation/configuration/opensearch-dashboards.yml
@@ -0,0 +1,24 @@
+kind: configuration/opensearch-dashboards
+title: "OpenSearch Dashboards specification schema"
+name: default
+type: object
+properties:
+ dashboards_os_user:
+ type: string
+ dashboards_os_group:
+ type: string
+ dashboards_user:
+ type: string
+ dashboards_user_password:
+ type: string
+ paths:
+ type: object
+ properties:
+ dashboards_home:
+ type: string
+ dashboards_conf_dir:
+ type: string
+ dashboards_plugin_bin_path:
+ type: string
+ dashboards_log_dir:
+ type: string
diff --git a/schema/common/validation/configuration/opendistro-for-elasticsearch.yml b/schema/common/validation/configuration/opensearch.yml
similarity index 65%
rename from schema/common/validation/configuration/opendistro-for-elasticsearch.yml
rename to schema/common/validation/configuration/opensearch.yml
index 3992bc36ab..83e2d77618 100644
--- a/schema/common/validation/configuration/opendistro-for-elasticsearch.yml
+++ b/schema/common/validation/configuration/opensearch.yml
@@ -1,12 +1,14 @@
"$id": "#/specification"
-title: "Opendistro-for-elasticsearch specification schema"
-description: "Opendistro-for-elasticsearch specification schema"
+title: "opensearch schema"
+description: "OpenSearch specification schema"
type: object
properties:
cluster_name:
type: string
- clustered:
- type: boolean
+ opensearch_os_user:
+ type: string
+ opensearch_os_group:
+ type: string
admin_password:
type: string
kibanaserver_password:
@@ -24,18 +26,24 @@ properties:
paths:
type: object
properties:
- data:
+ opensearch_home:
+ type: string
+ opensearch_conf_dir:
+ type: string
+ opensearch_log_dir:
+ type: string
+ opensearch_snapshots_dir:
type: string
- repo:
+ opensearch_data_dir:
type: string
- logs:
+ opensearch_perftop_dir:
type: string
jvm_options:
type: object
properties:
Xmx:
type: string
- opendistro_security:
+ opensearch_security:
type: object
properties:
ssl:
diff --git a/tests/spec/spec/kibana/kibana_spec.rb b/tests/spec/spec/kibana/kibana_spec.rb
deleted file mode 100644
index b6f79e839b..0000000000
--- a/tests/spec/spec/kibana/kibana_spec.rb
+++ /dev/null
@@ -1,89 +0,0 @@
-require 'spec_helper'
-
-# Configurable passwords for ES users were introduced in v0.10.0.
-# For testing upgrades, we use the default password for now but it should be read from kibana.yml (remote host).
-es_kibanaserver_user_password = readDataYaml('configuration/logging')['specification']['kibanaserver_password'] || 'kibanaserver'
-es_kibanaserver_user_is_active = readDataYaml('configuration/logging')['specification']['kibanaserver_user_active']
-es_kibanaserver_user_is_active = true if es_kibanaserver_user_is_active.nil?
-
-es_api_port = 9200
-kibana_default_port = 5601
-
-describe 'Check if Kibana package is installed' do
- describe package('opendistroforelasticsearch-kibana') do
- it { should be_installed }
- end
-end
-
-describe 'Check if Kibana service is running' do
- describe service('kibana') do
- it { should be_enabled }
- it { should be_running }
- end
-end
-
-describe 'Check if Kibana user exists' do
- describe group('kibana') do
- it { should exist }
- end
- describe user('kibana') do
- it { should exist }
- it { should belong_to_group 'kibana' }
- end
-end
-
-describe 'Check Kibana directories and config files' do
- describe file('/etc/kibana') do
- it { should exist }
- it { should be_a_directory }
- end
- describe file('/etc/kibana/kibana.yml') do
- it { should exist }
- it { should be_a_file }
- end
- describe file('/etc/logrotate.d/kibana') do
- it { should exist }
- it { should be_a_file }
- end
-end
-
-describe 'Check if non-empty Kibana log file exists' do
- describe command('find /var/log/kibana -maxdepth 1 -name kibana.log* -size +0 -type f | wc -l') do
- its(:exit_status) { should eq 0 }
- its('stdout.to_i') { should > 0 }
- end
-end
-
-if es_kibanaserver_user_is_active
- listInventoryHosts('logging').each do |val|
- describe 'Check the connection to the Elasticsearch hosts' do
- let(:disable_sudo) { false }
- describe command("curl -k -u kibanaserver:#{es_kibanaserver_user_password} -o /dev/null -s -w '%{http_code}' https://#{val}:#{es_api_port}") do
- it 'is expected to be equal' do
- expect(subject.stdout.to_i).to eq 200
- end
- end
- end
- end
-
- listInventoryHosts('kibana').each do |val|
- describe 'Check Kibana app HTTP status code' do
- let(:disable_sudo) { false }
- describe command("curl -u kibanaserver:#{es_kibanaserver_user_password} -o /dev/null -s -w '%{http_code}' http://#{val}:#{kibana_default_port}/app/kibana") do
- it 'is expected to be equal' do
- expect(subject.stdout.to_i).to eq 200
- end
- end
- end
- end
-end
-
-listInventoryHosts('kibana').each do |val|
- describe 'Check Kibana health' do
- let(:disable_sudo) { false }
- describe command("curl http://#{val}:#{kibana_default_port}/api/status") do
- its(:stdout_as_json) { should include('status' => include('overall' => include('state' => 'green'))) }
- its(:exit_status) { should eq 0 }
- end
- end
-end
diff --git a/tests/spec/spec/logging/logging_spec.rb b/tests/spec/spec/logging/logging_spec.rb
index 617f74bc59..54fa551981 100644
--- a/tests/spec/spec/logging/logging_spec.rb
+++ b/tests/spec/spec/logging/logging_spec.rb
@@ -1,41 +1,57 @@
require 'spec_helper'
-
# Configurable passwords for ES users were introduced in v0.10.0.
# For testing upgrades, we use the default password for now but we're going to switch to TLS auth.
+es_kibanaserver_user_password = readDataYaml('configuration/logging')['specification']['kibanaserver_password'] || 'kibanaserver'
+es_kibanaserver_user_is_active = readDataYaml('configuration/logging')['specification']['kibanaserver_user_active']
+es_kibanaserver_user_is_active = true if es_kibanaserver_user_is_active.nil?
es_admin_password = readDataYaml('configuration/logging')['specification']['admin_password'] || 'admin'
-
es_rest_api_port = 9200
es_transport_port = 9300
+opensearch_dashboards_port = 5601
+
+describe 'Check if opensearch service is running' do
+ describe service('opensearch') do
+ it { should be_enabled }
+ it { should be_running }
+ end
+end
-describe 'Check if Elasticsearch service is running' do
- describe service('elasticsearch') do
+describe 'Check if opensearch-dashboard service is running' do
+ describe service('opensearch-dashboards') do
it { should be_enabled }
it { should be_running }
end
end
-describe 'Check if elasticsearch user exists' do
- describe group('elasticsearch') do
+describe 'Check if opensearch user exists' do
+ describe group('opensearch') do
it { should exist }
end
- describe user('elasticsearch') do
+ describe user('opensearch') do
it { should exist }
- it { should belong_to_group 'elasticsearch' }
+ it { should belong_to_group 'opensearch' }
+ end
+end
+describe 'Check if opensearch_dashboards user exists' do
+ describe group('opensearch_dashboards') do
+ it { should exist }
+ end
+ describe user('opensearch_dashboards') do
+ it { should exist }
+ it { should belong_to_group 'opensearch_dashboards' }
end
end
-
describe 'Check Elasticsearch directories and config files' do
let(:disable_sudo) { false }
- describe file('/etc/elasticsearch') do
+ describe file('/usr/share/opensearch') do
it { should exist }
it { should be_a_directory }
end
- describe file('/etc/elasticsearch/elasticsearch.yml') do
+ describe file('/usr/share/opensearch/config/opensearch.yml') do
it { should exist }
it { should be_a_file }
end
end
-
describe 'Check if the ports are open' do
let(:disable_sudo) { false }
describe port(es_rest_api_port) do
@@ -55,10 +71,7 @@
end
end
end
-end
-
-listInventoryHosts('logging').each do |val|
- describe 'Check Elasticsearch health' do
+ describe 'Check OpenSearch health' do
let(:disable_sudo) { false }
describe command("curl -k -u admin:#{es_admin_password} https://#{val}:#{es_rest_api_port}/_cluster/health?pretty=true") do
its(:stdout_as_json) { should include('status' => /green|yellow/) }
@@ -66,4 +79,19 @@
its(:exit_status) { should eq 0 }
end
end
+ describe 'Check OpenSearch Dashboard HTTP status code' do
+ let(:disable_sudo) { false }
+ describe command("curl -u kibanaserver:#{es_kibanaserver_user_password} -o /dev/null -s -w '%{http_code}' http://#{val}:#{opensearch_dashboards_port}/app/login") do
+ it 'is expected to be equal' do
+ expect(subject.stdout.to_i).to eq 200
+ end
+ end
+ end
+ describe 'Check OpenSearch Dashboards health' do
+ let(:disable_sudo) { false }
+ describe command("curl http://#{val}:#{opensearch_dashboards_port}/api/status") do
+ its(:stdout_as_json) { should include('status' => include('overall' => include('state' => 'green'))) }
+ its(:exit_status) { should eq 0 }
+ end
+ end
end
diff --git a/tests/spec/spec/opensearch/opensearch_spec.rb b/tests/spec/spec/opensearch/opensearch_spec.rb
new file mode 100644
index 0000000000..8138c69389
--- /dev/null
+++ b/tests/spec/spec/opensearch/opensearch_spec.rb
@@ -0,0 +1,66 @@
+require 'spec_helper'
+# Configurable passwords for ES users were introduced in v0.10.0.
+# For testing upgrades, we use the default password for now but we're going to switch to TLS auth.
+es_kibanaserver_user_password = readDataYaml('configuration/opensearch')['specification']['kibanaserver_password'] || 'kibanaserver'
+es_kibanaserver_user_is_active = readDataYaml('configuration/opensearch')['specification']['kibanaserver_user_active']
+es_kibanaserver_user_is_active = true if es_kibanaserver_user_is_active.nil?
+es_admin_password = readDataYaml('configuration/opensearch')['specification']['admin_password'] || 'admin'
+es_rest_api_port = 9200
+es_transport_port = 9300
+
+describe 'Check if opensearch service is running' do
+ describe service('opensearch') do
+ it { should be_enabled }
+ it { should be_running }
+ end
+end
+
+describe 'Check if opensearch user exists' do
+ describe group('opensearch') do
+ it { should exist }
+ end
+ describe user('opensearch') do
+ it { should exist }
+ it { should belong_to_group 'opensearch' }
+ end
+end
+
+describe 'Check Elasticsearch directories and config files' do
+ let(:disable_sudo) { false }
+ describe file('/usr/share/opensearch') do
+ it { should exist }
+ it { should be_a_directory }
+ end
+ describe file('/usr/share/opensearch/config/opensearch.yml') do
+ it { should exist }
+ it { should be_a_file }
+ end
+end
+describe 'Check if the ports are open' do
+ let(:disable_sudo) { false }
+ describe port(es_rest_api_port) do
+ it { should be_listening }
+ end
+ describe port(es_transport_port) do
+ it { should be_listening }
+ end
+end
+
+listInventoryHosts('opensearch').each do |val|
+ describe 'Check Elasticsearch nodes status codes' do
+ let(:disable_sudo) { false }
+ describe command("curl -k -u admin:#{es_admin_password} -o /dev/null -s -w '%{http_code}' https://#{val}:#{es_rest_api_port}") do
+ it 'is expected to be equal' do
+ expect(subject.stdout.to_i).to eq 200
+ end
+ end
+ end
+ describe 'Check OpenSearch health' do
+ let(:disable_sudo) { false }
+ describe command("curl -k -u admin:#{es_admin_password} https://#{val}:#{es_rest_api_port}/_cluster/health?pretty=true") do
+ its(:stdout_as_json) { should include('status' => /green|yellow/) }
+ its(:stdout_as_json) { should include('number_of_nodes' => countInventoryHosts('opensearch')) }
+ its(:exit_status) { should eq 0 }
+ end
+ end
+end
diff --git a/tests/unit/helpers/test_data_loader.py b/tests/unit/helpers/test_data_loader.py
index d8239d36b1..e279749c9e 100644
--- a/tests/unit/helpers/test_data_loader.py
+++ b/tests/unit/helpers/test_data_loader.py
@@ -47,7 +47,7 @@
'postgresql': {'count': 1},
'load_balancer': {'count': 1},
'rabbitmq': {'count': 1},
- 'opendistro_for_elasticsearch': {'count': 1}
+ 'opensearch': {'count': 1}
}
}
}