Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes for #2944 and #2945 #2948

Merged
merged 7 commits into from
Feb 8, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 6 additions & 21 deletions cli/engine/ApplyEngine.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
import os
import sys

from ansible.parsing.dataloader import DataLoader
from ansible.inventory.manager import InventoryManager

from cli.version import VERSION
from cli.helpers.Step import Step
from cli.helpers.doc_list_helpers import select_single, select_all, select_first
from cli.helpers.build_io import save_manifest, load_manifest, get_inventory_path, get_manifest_path, get_build_path
from cli.helpers.build_io import save_manifest, load_manifest, get_inventory_path, get_manifest_path, get_build_path, load_inventory
from cli.helpers.yaml_helpers import safe_load_all
from cli.helpers.Log import Log
from cli.helpers.os_images import get_os_distro_normalized
Expand Down Expand Up @@ -39,7 +36,7 @@ def __init__(self, input_data):
self.infrastructure_docs = []
self.manifest_docs = []

self.__ping_retries: int = input_data.ping_retries
self.ping_retries: int = input_data.ping_retries
seriva marked this conversation as resolved.
Show resolved Hide resolved

def __enter__(self):
return self
Expand Down Expand Up @@ -121,7 +118,7 @@ def assert_no_master_downscale(self):
inventory_path = get_inventory_path(cluster_name)

if os.path.isfile(inventory_path):
existing_inventory = InventoryManager(loader=DataLoader(), sources=inventory_path)
existing_inventory = load_inventory(inventory_path)

both_present = all([
'kubernetes_master' in existing_inventory.list_groups(),
Expand Down Expand Up @@ -149,7 +146,7 @@ def assert_no_postgres_nodes_number_change(self):

if os.path.isfile(inventory_path):
next_postgres_node_count = 0
existing_inventory = InventoryManager(loader=DataLoader(), sources=inventory_path)
existing_inventory = load_inventory(inventory_path)
prev_postgres_node_count = len(existing_inventory.list_hosts(pattern='postgresql'))
postgres_available = [x for x in feature_mapping.specification.available_roles if x.name == 'postgresql']
if postgres_available[0].enabled:
Expand Down Expand Up @@ -196,7 +193,7 @@ def apply(self):

self.assert_consistent_os_family()

if not (self.skip_infrastructure or self.is_provider_any(self.cluster_model)):
if not (self.skip_infrastructure or self.cluster_model['provider'] == 'any'):
# Generate terraform templates
with TerraformTemplateGenerator(self.cluster_model, self.infrastructure_docs) as template_generator:
template_generator.run()
Expand All @@ -222,19 +219,7 @@ def apply(self):
# Run Ansible to provision infrastructure
if not(self.skip_config):
with AnsibleRunner(self.cluster_model, docs, ansible_options=self.ansible_options,
ping_retries=self.__ping_retries) as ansible_runner:
ping_retries=self.ping_retries) as ansible_runner:
ansible_runner.apply()

return 0

def dry_run(self):

self.process_input_docs()

self.process_configuration_docs()

return [*self.configuration_docs, *self.infrastructure_docs]

@staticmethod
def is_provider_any(cluster_model):
return cluster_model["provider"] == "any"
69 changes: 24 additions & 45 deletions cli/engine/InitEngine.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,11 @@
from cli.helpers.Step import Step
from cli.helpers.build_io import save_manifest, get_build_path
from cli.helpers.data_loader import load_all_schema_objs, types
from cli.engine.ApplyEngine import ApplyEngine
from cli.helpers.objdict_helpers import remove_value
from cli.version import VERSION
from cli.helpers.doc_list_helpers import select_all, select_single
from cli.helpers.doc_list_helpers import select_all
from cli.engine.schema.ConfigurationAppender import ConfigurationAppender
from cli.engine.schema.DefaultMerger import DefaultMerger


class InitEngine(Step):
Expand All @@ -25,23 +26,29 @@ def __exit__(self, exc_type, exc_value, traceback):
pass

def init(self):
input = load_all_schema_objs(types.DEFAULT, self.provider, 'configuration/minimal-cluster-config')
input[0].specification.name = self.name
# Load the minimal cluster-config doc and set the cluster name
docs = load_all_schema_objs(types.DEFAULT, self.provider, 'configuration/minimal-cluster-config')
docs[0].specification.name = self.name

# For full we also add the infrastructure and configuration documents
if self.is_full_config:
config = self.get_config_docs(input)
config_only = select_all(config, lambda x: not(x.kind.startswith('epiphany-cluster')))
if self.provider == 'any':
# for any provider we want to use the default config from minimal-cluster-config
cluster_model = select_single(input, lambda x: x.kind == 'epiphany-cluster')
# Merge with defaults
with DefaultMerger(docs) as doc_merger:
docs = doc_merger.run()

# Add infrastructure and configuration documents
if self.provider != 'any':
# Add VM infrastructure docs as these are most likely to be changed
infra_docs = load_all_schema_objs(types.DEFAULT, self.provider, 'infrastructure/virtual-machine')
else:
# for azure|aws provider we want to use the extended defaults cluster-config after dry run.
# TODO: We probably wants this comming from seperate documents since Azure and AWS overlap now...
cluster_model = select_single(config, lambda x: x.kind == 'epiphany-cluster')
infra = self.get_infra_docs(input)
docs = [cluster_model, *config_only, *infra]
else:
docs = [*input]
# For any provider, infrastructure docs are already part of the minimal-cluster-config template
infra_docs = select_all(docs, lambda x: x.kind.startswith('infrastructure/machine'))

# Add configuration documents
with ConfigurationAppender(docs) as config_appender:
config_docs = config_appender.run()

docs = [*config_docs, *infra_docs]

# set the provider and version for all docs
for doc in docs:
Expand All @@ -52,35 +59,7 @@ def init(self):
remove_value(docs, 'SET_BY_AUTOMATION')

# save document
save_manifest(docs, self.name, self.name+'.yml')
save_manifest(docs, self.name, f'{ self.name }.yml')

self.logger.info('Initialized new configuration and saved it to "' + os.path.join(get_build_path(self.name), self.name + '.yml') + '"')
return 0

def get_config_docs(self, input_docs):
cluster_config_path = save_manifest(input_docs, self.name, self.name + '.yml')
args = type('obj', (object,), {'file': cluster_config_path})()
args.ansible_forks = 10 # TODO: remove these workarounds
args.ping_retries = 5

# generate the config documents
with ApplyEngine(args) as build:
config = build.dry_run()

return config

def get_infra_docs(self, input_docs):
if self.provider == 'any':
# For any we can include the machine documents from the minimal-cluster-config
infra = select_all(input_docs, lambda x: x.kind.startswith('infrastructure/machine'))
else:
# VMs are curently the infrastructure documents the user might interact with for:
# - type/size
# - distro
# - network security rules
# ...
# So we add the defaults here.
# TODO: Check if we want to include possible other infrastructure documents.
infra = load_all_schema_objs(types.DEFAULT, self.provider, 'infrastructure/virtual-machine')

return infra
4 changes: 2 additions & 2 deletions cli/engine/UpgradeEngine.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def __init__(self, input_data):
self.backup_build_dir = ''
self.ansible_command = AnsibleCommand()
self.input_docs = []
self.__ping_retries: int = input_data.ping_retries
self.ping_retries: int = input_data.ping_retries

def __enter__(self):
super().__enter__()
Expand Down Expand Up @@ -88,7 +88,7 @@ def upgrade(self):
# Run Ansible to upgrade infrastructure
with AnsibleRunner(build_dir=self.build_dir, backup_build_dir=self.backup_build_dir,
ansible_options=self.ansible_options, config_docs=self.input_docs,
ping_retries=self.__ping_retries) as ansible_runner:
ping_retries=self.ping_retries) as ansible_runner:
ansible_runner.upgrade()

return 0
8 changes: 2 additions & 6 deletions cli/engine/ansible/AnsibleInventoryUpgrade.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,8 @@
from ansible.parsing.dataloader import DataLoader
from ansible.inventory.manager import InventoryManager

from cli.helpers.Step import Step
from cli.helpers.build_io import get_inventory_path_for_build
from cli.models.AnsibleHostModel import AnsibleHostModel
from cli.models.AnsibleInventoryItem import AnsibleInventoryItem
from cli.helpers.build_io import save_inventory, load_manifest
from cli.helpers.objdict_helpers import dict_to_objdict
from cli.helpers.build_io import save_inventory, load_manifest, load_inventory
from cli.helpers.data_loader import load_schema_obj, types as data_types
from cli.helpers.doc_list_helpers import select_single
from cli.helpers.objdict_helpers import merge_objdict
Expand Down Expand Up @@ -56,7 +52,7 @@ def upgrade(self):
inventory_path = get_inventory_path_for_build(self.backup_build_dir)

self.logger.info(f'Loading backup Ansible inventory: {inventory_path}')
loaded_inventory = InventoryManager(loader = DataLoader(), sources=inventory_path)
loaded_inventory = load_inventory(inventory_path)

# move loaded inventory to templating structure
new_inventory = []
Expand Down
5 changes: 2 additions & 3 deletions cli/engine/ansible/AnsibleRunner.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def __init__(self, cluster_model=None, config_docs=None, build_dir=None, backup_
self.backup_build_dir = backup_build_dir
self.ansible_options = ansible_options
self.ansible_command = AnsibleCommand()
self.__ping_retries: int = ping_retries
self.ping_retries: int = ping_retries

def __enter__(self):
super().__enter__()
Expand Down Expand Up @@ -57,7 +57,7 @@ def pre_flight(self, inventory_path):
self.ansible_command.run_task_with_retries(inventory=inventory_path,
module="ping",
hosts="all",
retries=self.__ping_retries)
retries=self.ping_retries)

self.logger.info('Checking preflight conditions on each machine')
self.ansible_command.run_playbook_with_retries(inventory=inventory_path,
Expand Down Expand Up @@ -106,7 +106,6 @@ def apply(self):
# create inventory
inventory_creator = AnsibleInventoryCreator(self.cluster_model, self.config_docs)
inventory_creator.create()
time.sleep(10)

# create ansible.cfg
ansible_config_file_path = get_ansible_config_file_path(self.cluster_model.specification.name)
Expand Down
2 changes: 1 addition & 1 deletion cli/engine/schema/ConfigurationAppender.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@


class ConfigurationAppender(Step):
REQUIRED_DOCS = ['configuration/feature-mapping', 'configuration/shared-config', 'epiphany-cluster']
REQUIRED_DOCS = ['epiphany-cluster', 'configuration/feature-mapping', 'configuration/shared-config']

def __init__(self, input_docs):
super().__init__(__name__)
Expand Down
4 changes: 1 addition & 3 deletions cli/epicli.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,9 +490,7 @@ def dump_external_debug_info(title, args):
dump_file.write('\n\n*****PYTHON******\n')
dump_file.write(f'python_version: {platform.python_version()}\n')
dump_file.write(f'python_build: {platform.python_build()}\n')
dump_file.write(f'python_revision: {platform.python_revision()}\n')
dump_file.write(f'python_compiler: {platform.python_compiler()}\n')
dump_file.write(f'python_branch: {platform.python_branch()}\n')
dump_file.write(f'python_implementation: {platform.python_implementation()}\n')

dump_external_debug_info('ANSIBLE VERSION', ['ansible', '--version'])
Expand All @@ -501,7 +499,7 @@ def dump_external_debug_info(title, args):
dump_external_debug_info('TERRAFORM VERSION', ['terraform', '--version'])
dump_external_debug_info('RUBY VERSION', ['ruby', '--version'])
dump_external_debug_info('RUBY GEM VERSION', ['gem', '--version'])
dump_external_debug_info('RUBY INSTALLED GEMS', ['gem', 'query', '--local'])
dump_external_debug_info('RUBY INSTALLED GEMS', ['gem', 'list', '--local'])

dump_file.write('\n\n*****LOG******\n')
log_path = os.path.join(get_output_path(), config.log_file)
Expand Down
7 changes: 7 additions & 0 deletions cli/helpers/build_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
from os.path import isfile, join
from distutils import dir_util

from ansible.parsing.dataloader import DataLoader
from ansible.inventory.manager import InventoryManager

from cli.helpers.data_loader import load_template_file, load_yamls_file, types
from cli.helpers.yaml_helpers import dump_all, dump
from cli.helpers.Config import Config
Expand Down Expand Up @@ -53,6 +56,10 @@ def save_inventory(inventory, cluster_model, build_dir=None):
save_to_file(file_path, content)


def load_inventory(inventory_path):
return InventoryManager(loader=DataLoader(), sources=inventory_path)


def save_ansible_config_file(ansible_config_file_settings, ansible_config_file_path):
template = load_template_file(types.ANSIBLE, '', ANSIBLE_CFG_FILE)
content = template.render(ansible_config_file_settings=ansible_config_file_settings)
Expand Down
3 changes: 2 additions & 1 deletion docs/changelogs/CHANGELOG-2.0.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,9 @@
- [#1569](https://github.com/epiphany-platform/epiphany/issues/1569) - Azure unmanaged disks not supported by Epiphany but there is misleading setting in the default configuration
- [#2832](https://github.com/epiphany-platform/epiphany/issues/2832) - Make the DoD checklist clear
- [#2853](https://github.com/epiphany-platform/epiphany/issues/2853) - Change autoscaling_group approach in AWS provider in favor of plain VM creation.

- [#2669](https://github.com/epiphany-platform/epiphany/issues/2669) - Restarting the installation process can cause certificate problems if K8s was not fully configured
- [#2944](https://github.com/epiphany-platform/epiphany/issues/2944) - Refactor InitEngine class to be agnostic to changes in ApplyEngine and UpgradeEngine
- [#2945](https://github.com/epiphany-platform/epiphany/issues/2945) - epicli apply sleeps 10 seconds after creating inventory

### Updated

Expand Down
22 changes: 11 additions & 11 deletions schema/any/defaults/epiphany-cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,45 +11,45 @@ specification:
components:
kubernetes_master:
count: 1
machine: kubernetes-master-machine
machines: []
configuration: default
kubernetes_node:
count: 2
machine: kubernetes-node-machine
machines: []
configuration: default
logging:
count: 1
machine: logging-machine
machines: []
configuration: default
monitoring:
count: 1
machine: monitoring-machine
machines: []
configuration: default
kafka:
count: 2
machine: kafka-machine
machines: []
configuration: default
postgresql:
count: 0
machine: postgresql-machine
machines: []
configuration: default
load_balancer:
count: 1
machine: load-balancer-machine
machines: []
configuration: default
rabbitmq:
count: 0
machine: rabbitmq-machine
machines: []
configuration: default
opendistro_for_elasticsearch:
count: 0
machine: logging-machine
machines: []
configuration: default
repository:
count: 1
machine: repository-machine
machines: []
configuration: default
single_machine:
count: 0
machine: single-machine
machines: []
configuration: default