Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[2.0.1] Split available_roles and roles_mapping into separate yaml documents … #3119

Merged
merged 1 commit into from
May 17, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ansible/playbooks/repository.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
# This playbook is empty by purpose, just to enable repository role in configuration/feature-mapping
# This playbook is empty by purpose, just to enable repository role in configuration/features
# to populate defaults/configuration to Ansible vars
- hosts: "!all"
tasks: []
8 changes: 4 additions & 4 deletions cli/src/ansible/AnsibleInventoryCreator.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,12 @@ def get_inventory(self):
return self.group_duplicated(inventory)

def get_roles_for_feature(self, component_key):
features_map = select_single(self.config_docs, lambda x: x.kind == 'configuration/feature-mapping')
return features_map.specification.roles_mapping[component_key]
features_map = select_single(self.config_docs, lambda x: x.kind == 'configuration/feature-mappings')
return features_map.specification[component_key]

def get_available_roles(self):
features_map = select_single(self.config_docs, lambda x: x.kind == 'configuration/feature-mapping')
return features_map.specification.available_roles
features = select_single(self.config_docs, lambda x: x.kind == 'configuration/features')
return features.specification

def get_enabled_roles(self):
roles = self.get_available_roles()
Expand Down
24 changes: 14 additions & 10 deletions cli/src/commands/Apply.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os
import sys
from typing import Dict

from cli.src.ansible.AnsibleRunner import AnsibleRunner
from cli.src.helpers.build_io import (get_build_path, get_inventory_path,
Expand Down Expand Up @@ -213,23 +214,26 @@ def assert_no_master_downscale(self):
raise Exception("ControlPlane downscale is not supported yet. Please revert your 'kubernetes_master' count to previous value or increase it to scale up Kubernetes.")


def __load_configuration_doc(self, kind: str) -> Dict:
doc = select_first(self.input_docs, lambda x: x.kind == kind)
if not doc:
return load_schema_obj(schema_types.DEFAULT, 'common', kind)

with DefaultMerger([doc]) as doc_merger:
return doc_merger.run()[0]

def assert_no_postgres_nodes_number_change(self):
feature_mapping = select_first(self.input_docs, lambda x: x.kind == 'configuration/feature-mapping')
if feature_mapping:
with DefaultMerger([feature_mapping]) as doc_merger:
feature_mapping = doc_merger.run()
feature_mapping = feature_mapping[0]
else:
feature_mapping = load_schema_obj(schema_types.DEFAULT, 'common', 'configuration/feature-mapping')
feature_mappings = self.__load_configuration_doc('configuration/feature-mappings')
features = self.__load_configuration_doc('configuration/features')

components = self.cluster_model.specification.components
if self.inventory:
next_postgres_node_count = 0
prev_postgres_node_count = len(self.inventory.list_hosts(pattern='postgresql'))
postgres_available = [x for x in feature_mapping.specification.available_roles if x.name == 'postgresql']
postgres_available = [x for x in features.specification if x.name == 'postgresql']
if postgres_available[0].enabled:
for key, roles in feature_mapping.specification.roles_mapping.items():
if ('postgresql') in roles and key in components:
for key, features in feature_mappings.specification.items():
if ('postgresql') in features and key in components:
next_postgres_node_count = next_postgres_node_count + components[key].count

if prev_postgres_node_count > 0 and prev_postgres_node_count != next_postgres_node_count:
Expand Down
3 changes: 3 additions & 0 deletions cli/src/commands/Init.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@ def init(self):
config_docs = config_appender.run()

docs = [*config_docs, *infra_docs]
else:
with ConfigurationAppender(docs) as config_appender:
config_appender.add_feature_mappings()

# set the provider and version for all docs
for doc in docs:
Expand Down
75 changes: 46 additions & 29 deletions cli/src/schema/ConfigurationAppender.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from typing import Callable, Dict, List

from cli.src.helpers.config_merger import merge_with_defaults
from cli.src.helpers.data_loader import load_schema_obj, schema_types
from cli.src.helpers.doc_list_helpers import select_first, select_single
Expand All @@ -6,45 +8,60 @@


class ConfigurationAppender(Step):
REQUIRED_DOCS = ['epiphany-cluster', 'configuration/feature-mapping', 'configuration/shared-config']
REQUIRED_DOCS = ['epiphany-cluster',
'configuration/features',
'configuration/feature-mappings',
'configuration/shared-config']

def __init__(self, input_docs):
super().__init__(__name__)
self.cluster_model = select_single(input_docs, lambda x: x.kind == 'epiphany-cluster')
self.input_docs = input_docs
self.__cluster_model: Dict = select_single(input_docs, lambda x: x.kind == 'epiphany-cluster')
self.__input_docs: List[Dict] = input_docs

def run(self):
configuration_docs = []
def __append_config(self, config_docs: List[Dict], document: Dict):
document['version'] = VERSION
config_docs.append(document)

def __add_doc(self, config_docs: List[Dict], document_kind: str):
doc = select_first(self.__input_docs, lambda x, kind=document_kind: x.kind == kind)
if doc is None:
doc = load_schema_obj(schema_types.DEFAULT, 'common', document_kind)
self.logger.info(f'Adding: {doc.kind}')

self.__append_config(config_docs, doc)

def __feature_selector(self, feature_key: str, config_selector: str) -> Callable:
return lambda x, key=feature_key, selector=config_selector: x.kind == f'configuration/{key}' and x.name == selector

def append_config(doc):
doc['version'] = VERSION
configuration_docs.append(doc)
def add_feature_mappings(self):
feature_mappings: List[Dict] = []
self.__add_doc(feature_mappings, 'configuration/feature-mappings')

if feature_mappings is not None:
self.__input_docs.append(feature_mappings[0])

def run(self):
configuration_docs: List[Dict] = []

for document_kind in ConfigurationAppender.REQUIRED_DOCS:
doc = select_first(self.input_docs, lambda x: x.kind == document_kind)
if doc is None:
doc = load_schema_obj(schema_types.DEFAULT, 'common', document_kind)
self.logger.info("Adding: " + doc.kind)
append_config(doc)
else:
append_config(doc)

for component_key, component_value in self.cluster_model.specification.components.items():
self.__add_doc(configuration_docs, document_kind)

for component_key, component_value in self.__cluster_model.specification.components.items():
if component_value.count < 1:
continue

features_map = select_first(configuration_docs, lambda x: x.kind == 'configuration/feature-mapping')
feature_mappings = select_first(configuration_docs, lambda x: x.kind == 'configuration/feature-mappings')
config_selector = component_value.configuration
for feature_key in features_map.specification.roles_mapping[component_key]:
config = select_first(self.input_docs, lambda x: x.kind == 'configuration/' + feature_key and x.name == config_selector)
if config is not None:
append_config(config)
if config is None:
config = select_first(configuration_docs, lambda
x: x.kind == 'configuration/' + feature_key and x.name == config_selector)
if config is None:
config = merge_with_defaults('common', 'configuration/' + feature_key, config_selector, self.input_docs)
self.logger.info("Adding: " + config.kind)
append_config(config)
for feature_key in feature_mappings.specification.mappings[component_key]:
first_input_docs_config = select_first(self.__input_docs, self.__feature_selector(feature_key, config_selector))
if first_input_docs_config is not None:
self.__append_config(configuration_docs, first_input_docs_config)
else:
first_config = select_first(configuration_docs, self.__feature_selector(feature_key, config_selector))

if first_config is None:
merged_config = merge_with_defaults('common', f'configuration/{feature_key}', config_selector, self.__input_docs)
self.logger.info(f'Adding: {merged_config.kind}')
self.__append_config(configuration_docs, merged_config)

return configuration_docs
8 changes: 7 additions & 1 deletion docs/changelogs/CHANGELOG-2.0.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@

### Added

- [#3097](https://github.com/epiphany-platform/epiphany/issues/3097) - Split available_roles and roles_mapping into separate yaml documents

### Breaking changes

- Schema `configuration/feature-mapping` changed. The document was splitted into two separate docs `configuration/features` and `configuration/feature-mappings`.

## [2.0.0] 2022-05-09

### Added
Expand Down Expand Up @@ -58,7 +64,7 @@
- [#2803](https://github.com/epiphany-platform/epiphany/issues/2803) - Refactor: rename 'kafka_var' setting
- [#2995](https://github.com/epiphany-platform/epiphany/issues/2995) - Update expired RHUI client certificate before installing any RHEL packages
- [#3049](https://github.com/epiphany-platform/epiphany/issues/3049) - HAProxy upgrade fails on re-run trying to remove haproxy_exporter
- [#3006](https://github.com/epiphany-platform/epiphany/issues/3006) - install 'containerd.io=1.4.12-*' failed, when upgrade from v1.3.0 to 2.0.0dev
- [#3006](https://github.com/epiphany-platform/epiphany/issues/3006) - install `containerd.io=1.4.12-*` failed, when upgrade from v1.3.0 to 2.0.0dev
- [#3065](https://github.com/epiphany-platform/epiphany/issues/3065) - Flag `delete_os_disk_on_termination` has no effect when removing cluster

### Updated
Expand Down
67 changes: 35 additions & 32 deletions docs/home/howto/CLUSTER.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,9 @@ Disable:
2. Prepend `kubernetes_master` mapping (or any other mapping if you don't deploy Kubernetes) with:

```yaml
kind: configuration/feature-mapping
kind: configuration/feature-mappings
specification:
...
roles_mapping:
mappings:
...
kubernetes_master:
- repository
Expand Down Expand Up @@ -290,12 +289,12 @@ specification:
kubernetes_node:
count: 2
---
kind: configuration/feature-mapping
title: "Feature mapping to roles"
kind: configuration/feature-mappings
title: "Feature mapping to components"
provider: <provider>
name: default
specification:
roles_mapping:
mappings:
kubernetes_master:
- repository
- image-registry
Expand Down Expand Up @@ -671,40 +670,44 @@ specification:

Epiphany gives you the ability to define custom components. This allows you to define a custom set of roles for a component you want to use in your cluster. It can be useful when you for example want to maximize usage of the available machines you have at your disposal.

The first thing you will need to do is define it in the `configuration/feature-mapping` configuration. To get this configuration you can run `epicli init ... --full` command. In the `available_roles` roles section you can see all the available roles that Epiphany provides. The `roles_mapping` is where all the Epiphany components are defined and were you need to add your custom components.
The first thing you will need to do is define it in the `configuration/features` and the `configuration/feature-mappings` configurations. To get these configurations you can run `epicli init ... --full` command. In the `configuration/features` doc you can see all the available features that Epiphany provides. The `configuration/feature-mappings` doc is where all the Epiphany components are defined and where you can add your custom components.

Below are parts of an example `configuration/feature-mapping` were we define a new `single_machine_new` component. We want to use Kafka instead of RabbitMQ and don`t need applications and postgres since we don't want a Keycloak deployment:
Below are parts of an example `configuration/features` and `configuration/feature-mappings` docs where we define a new `single_machine_new` component. We want to use Kafka instead of RabbitMQ and don't need applications and postgres since we don't want a Keycloak deployment:

```yaml
kind: configuration/feature-mapping
title: Feature mapping to roles
kind: configuration/features
title: "Features to be enabled/disabled"
name: default
specification:
available_roles: # All entries here represent the available roles within Epiphany
- name: repository
enabled: yes
- name: firewall
enabled: yes
- name: image-registry
...
roles_mapping: # All entries here represent the default components provided with Epiphany
...
features: # All entries here represent the available features within Epiphany
- name: repository
enabled: yes
- name: firewall
enabled: yes
- name: image-registry
...
---
kind: configuration/feature-mappings
title: "Feature mapping to components"
name: default
specification:
mappings: # All entries here represent the default components provided with Epiphany
single_machine:
- repository
- image-registry
- kubernetes-master
- applications
- rabbitmq
- postgresql
- firewall
- repository
- image-registry
- kubernetes-master
- applications
- rabbitmq
- postgresql
- firewall
# Below is the new single_machine_new definition
single_machine_new:
- repository
- image-registry
- kubernetes-master
- kafka
- firewall
...
- repository
- image-registry
- kubernetes-master
- kafka
- firewall
...
```

Once defined the new `single_machine_new` can be used inside the `epiphany-cluster` configuration:
Expand Down
6 changes: 3 additions & 3 deletions docs/home/howto/DATABASES.md
Original file line number Diff line number Diff line change
Expand Up @@ -491,11 +491,11 @@ By default, Kibana is deployed only for `logging` component. If you want to depl
for `opendistro_for_elasticsearch` you have to modify feature mapping. Use below configuration in your manifest.

```yaml
kind: configuration/feature-mapping
title: "Feature mapping to roles"
kind: configuration/feature-mappings
title: "Feature mapping to components"
name: default
specification:
roles_mapping:
mappings:
opendistro_for_elasticsearch:
- opendistro-for-elasticsearch
- node-exporter
Expand Down
8 changes: 4 additions & 4 deletions docs/home/howto/KUBERNETES.md
Original file line number Diff line number Diff line change
Expand Up @@ -143,15 +143,15 @@ specification:
count: 2
```

2. Enable `applications` in feature-mapping in initial configuration manifest.
2. Enable `applications` in `configuration/features` in initial configuration manifest.

```yaml
---
kind: configuration/feature-mapping
title: Feature mapping to roles
kind: configuration/features
title: "Features to be enabled/disabled"
name: default
specification:
available_roles:
features:
- name: applications
enabled: true
```
Expand Down
6 changes: 3 additions & 3 deletions docs/home/howto/MODULES.md
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,12 @@ AWS:
rabbitmq:
count: 0
---
kind: configuration/feature-mapping
title: Feature mapping to roles
kind: configuration/feature-mappings
title: "Feature mapping to components"
name: your-cluster-name # <----- make unified with other places and build directory name
provider: any
specification:
roles_mapping:
mappings:
repository:
- repository
- image-registry
Expand Down
6 changes: 3 additions & 3 deletions docs/home/howto/UPGRADE.md
Original file line number Diff line number Diff line change
Expand Up @@ -206,12 +206,12 @@ specification:
prefix: 'prefix'
title: Epiphany cluster Config
---
kind: configuration/feature-mapping
title: Feature mapping to roles
kind: configuration/feature-mappings
title: "Feature mapping to components"
provider: azure
name: default
specification:
roles_mapping:
mappings:
kubernetes_master:
- kubernetes-master
- helm
Expand Down
Loading