diff --git a/Makefile b/Makefile index 0a50973e5..999d9424b 100644 --- a/Makefile +++ b/Makefile @@ -90,6 +90,12 @@ copy-files: install -d -m 755 $(DESTDIR)/srv/salt/ceph/tests/quiescent/timeout install -m 644 srv/salt/ceph/tests/quiescent/*.sls $(DESTDIR)/srv/salt/ceph/tests/quiescent install -m 644 srv/salt/ceph/tests/quiescent/timeout/*.sls $(DESTDIR)/srv/salt/ceph/tests/quiescent/timeout + install -d -m 755 $(DESTDIR)/srv/salt/ceph/tests/migrate + install -m 644 srv/salt/ceph/tests/migrate/*.sls $(DESTDIR)/srv/salt/ceph/tests/migrate + install -d -m 755 $(DESTDIR)/srv/salt/ceph/tests/remove + install -m 644 srv/salt/ceph/tests/remove/*.sls $(DESTDIR)/srv/salt/ceph/tests/remove + install -d -m 755 $(DESTDIR)/srv/salt/ceph/tests/replace + install -m 644 srv/salt/ceph/tests/replace/*.sls $(DESTDIR)/srv/salt/ceph/tests/replace install -d -m 755 $(DESTDIR)/srv/salt/ceph/tests/restart install -d -m 755 $(DESTDIR)/srv/salt/ceph/tests/restart/mon install -d -m 755 $(DESTDIR)/srv/salt/ceph/tests/restart/mon/change @@ -147,6 +153,41 @@ copy-files: install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/tuned/off install -m 644 srv/salt/ceph/functests/1node/tuned/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/tuned install -m 644 srv/salt/ceph/functests/1node/tuned/off/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/tuned/off + # functests/1node + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node + install -m 644 srv/salt/ceph/functests/1node/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/apparmor + install -m 644 srv/salt/ceph/functests/1node/apparmor/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/apparmor + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/keyrings + install -m 644 srv/salt/ceph/functests/1node/keyrings/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/keyrings + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/macros + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/macros/os_switch + install -m 644 srv/salt/ceph/functests/1node/macros/os_switch/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/macros/os_switch + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/openstack + install -m 644 srv/salt/ceph/functests/1node/openstack/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/openstack + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/quiescent + install -m 644 srv/salt/ceph/functests/1node/quiescent/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/quiescent + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/migrate + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/migrate/filestore + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/migrate/filestore2 + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/migrate/bluestore + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/migrate/bluestore2 + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/migrate/bluestore3 + install -m 644 srv/salt/ceph/functests/1node/migrate/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/migrate + install -m 644 srv/salt/ceph/functests/1node/migrate/filestore/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/migrate/filestore + install -m 644 srv/salt/ceph/functests/1node/migrate/filestore2/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/migrate/filestore2 + install -m 644 srv/salt/ceph/functests/1node/migrate/bluestore/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/migrate/bluestore + install -m 644 srv/salt/ceph/functests/1node/migrate/bluestore2/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/migrate/bluestore2 + install -m 644 srv/salt/ceph/functests/1node/migrate/bluestore3/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/migrate/bluestore3 + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/replace + install -m 644 srv/salt/ceph/functests/1node/replace/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/replace + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/remove + install -m 644 srv/salt/ceph/functests/1node/remove/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/remove + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/restart + install -m 644 srv/salt/ceph/functests/1node/restart/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/restart + install -d -m 755 $(DESTDIR)/srv/salt/ceph/functests/1node/tuned/off + install -m 644 srv/salt/ceph/functests/1node/tuned/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/tuned + install -m 644 srv/salt/ceph/functests/1node/tuned/off/*.sls $(DESTDIR)/srv/salt/ceph/functests/1node/tuned/off # docs install -d -m 755 $(DESTDIR)$(DOCDIR)/deepsea install -m 644 LICENSE $(DESTDIR)$(DOCDIR)/deepsea/ @@ -196,6 +237,11 @@ copy-files: # modules install -d -m 755 $(DESTDIR)/srv/salt/_modules install -m 644 srv/salt/_modules/*.py* $(DESTDIR)/srv/salt/_modules/ + # state modules + install -d -m 755 $(DESTDIR)/srv/salt/_states + install -d -m 755 $(DESTDIR)/srv/salt/_states/__pycache__ + install -m 644 srv/salt/_states/__pycache__/*.pyc $(DESTDIR)/srv/salt/_states/__pycache__ + install -m 644 srv/salt/_states/*.py* $(DESTDIR)/srv/salt/_states/ # state files install -d -m 755 $(DESTDIR)/srv/salt/ceph/admin install -m 644 srv/salt/ceph/admin/*.sls $(DESTDIR)/srv/salt/ceph/admin/ @@ -529,8 +575,10 @@ copy-files: install -m 644 srv/salt/ceph/remove/igw/auth/*.sls $(DESTDIR)/srv/salt/ceph/remove/igw/auth/ install -d -m 755 $(DESTDIR)/srv/salt/ceph/remove/mds install -m 644 srv/salt/ceph/remove/mds/*.sls $(DESTDIR)/srv/salt/ceph/remove/mds/ - install -d -m 755 $(DESTDIR)/srv/salt/ceph/remove/migrated - install -m 644 srv/salt/ceph/remove/migrated/*.sls $(DESTDIR)/srv/salt/ceph/remove/migrated/ + install -d -m 755 $(DESTDIR)/srv/salt/ceph/remove/destroyed + install -m 644 srv/salt/ceph/remove/destroyed/*.sls $(DESTDIR)/srv/salt/ceph/remove/destroyed/ + # Renamed for deprecation + ln -sf destroyed $(DESTDIR)/srv/salt/ceph/remove/migrated install -d -m 755 $(DESTDIR)/srv/salt/ceph/remove/mgr install -m 644 srv/salt/ceph/remove/mgr/*.sls $(DESTDIR)/srv/salt/ceph/remove/mgr/ install -d -m 755 $(DESTDIR)/srv/salt/ceph/remove/mon diff --git a/deepsea.spec.in b/deepsea.spec.in index aae928027..1632324ea 100644 --- a/deepsea.spec.in +++ b/deepsea.spec.in @@ -82,6 +82,8 @@ systemctl try-restart salt-api > /dev/null 2>&1 || : %dir %attr(0755, salt, salt) /srv/pillar/ceph/benchmarks/fio %dir %attr(0755, salt, salt) /srv/pillar/ceph/benchmarks/templates %dir /srv/salt/_modules +%dir /srv/salt/_states +%dir /srv/salt/_states/__pycache__ %dir /srv/modules %dir /srv/modules/modules %dir /srv/modules/runners @@ -242,13 +244,13 @@ systemctl try-restart salt-api > /dev/null 2>&1 || : %dir /srv/salt/ceph/redeploy/osds %dir /srv/salt/ceph/redeploy/nodes %dir /srv/salt/ceph/remove +%dir /srv/salt/ceph/remove/destroyed %dir /srv/salt/ceph/remove/ganesha %dir /srv/salt/ceph/remove/igw %dir /srv/salt/ceph/remove/igw/auth %dir /srv/salt/ceph/remove/mon %dir /srv/salt/ceph/remove/mds %dir /srv/salt/ceph/remove/mgr -%dir /srv/salt/ceph/remove/migrated %dir /srv/salt/ceph/remove/rgw %dir /srv/salt/ceph/remove/storage %dir /srv/salt/ceph/remove/storage/drain @@ -408,6 +410,7 @@ systemctl try-restart salt-api > /dev/null 2>&1 || : %config(noreplace) %attr(-, salt, salt) /etc/salt/master.d/*.conf /srv/modules/modules/*.py* /srv/modules/runners/*.py* +%exclude /srv/modules/runners/smoketests.py /srv/modules/utils/*.py* %config %attr(-, salt, salt) /srv/pillar/top.sls %config %attr(-, salt, salt) /srv/pillar/ceph/init.sls @@ -419,6 +422,8 @@ systemctl try-restart salt-api > /dev/null 2>&1 || : %config(noreplace) %attr(-, salt, salt) /srv/pillar/ceph/deepsea_minions.sls %config %attr(-, salt, salt) /srv/pillar/ceph/stack/stack.cfg /srv/salt/_modules/*.py* +/srv/salt/_states/*.py* +/srv/salt/_states/__pycache__/*.py* %config /srv/salt/ceph/admin/*.sls %config /srv/salt/ceph/admin/files/*.j2 %config /srv/salt/ceph/admin/key/*.sls @@ -565,7 +570,8 @@ systemctl try-restart salt-api > /dev/null 2>&1 || : %config /srv/salt/ceph/remove/igw/auth/*.sls %config /srv/salt/ceph/remove/mon/*.sls %config /srv/salt/ceph/remove/mds/*.sls -%config /srv/salt/ceph/remove/migrated/*.sls +%config /srv/salt/ceph/remove/destroyed/*.sls +%config /srv/salt/ceph/remove/migrated %config /srv/salt/ceph/remove/mgr/*.sls %config /srv/salt/ceph/remove/openattic/*.sls %config /srv/salt/ceph/remove/rgw/*.sls @@ -747,5 +753,4 @@ the README for more information. %{_libexecdir}/deepsea/qa /srv/salt/ceph/functests /srv/salt/ceph/tests - %changelog diff --git a/srv/modules/runners/advise.py b/srv/modules/runners/advise.py index fd0dbebbb..3688720d7 100644 --- a/srv/modules/runners/advise.py +++ b/srv/modules/runners/advise.py @@ -102,3 +102,63 @@ def networks(): __func_alias__ = { 'help_': 'help', } + + +def osds(): + """ + Inform the admin of pending changes and appropriate actions + + Note: I went with the mapping here such as 'unconfigured' implies + 'deploy'. This is more about communicating with the maintainers + although picking the "best" name and propogating may be a solution. + + The deploy and redeploy are osd methods. + """ + local = salt.client.LocalClient() + report = local.cmd('I@roles:storage', 'osd.report', + ['human=False'], tgt_type="compound") + + bold = '\033[1m' + endc = '\033[0m' + + unconfigured = _tidy('unconfigured', report) + changed = _tidy('changed', report) + unmounted = _tidy('unmounted', report) + + messages = {'deploy': {'header': '\nThese devices will be deployed', + 'footer': "Run 'salt-run state.orch ceph.stage.3'"}, + 'redeploy': {'header': "\nThe devices will be redeployed", + 'footer': "Run 'salt-run state.orch ceph.migrate.osds'"}, + 'stale': {'header': "\nVerify that these devices are in the desired state", + 'footer': "Run 'salt MINION osd.delete_grain ID' for a stale entry"}} + + if unconfigured: + print(messages['deploy']['header']) + print("{}{}{}".format(bold, unconfigured, endc)) + print(messages['deploy']['footer']) + + if changed: + print(messages['redeploy']['header']) + print("{}{}{}".format(bold, changed, endc)) + print(messages['redeploy']['footer']) + + if unmounted: + print(messages['stale']['header']) + print("{}{}{}".format(bold, unmounted, endc)) + print(messages['stale']['footer']) + + return "" + + +def _tidy(key, report): + """ + Return a line of minion followed by comma separated devices if present + """ + line = "" + for minion in sorted(report): + if report[minion][key]: + if len(minion) + len(", ".join(report[minion][key])) < 80: + line += "{}: {}\n".format(minion, ", ".join(sorted(report[minion][key]))) + else: + line += "\n{}:\n {}\n".format(minion, "\n ".join(sorted(report[minion][key]))) + return line diff --git a/srv/modules/runners/push.py b/srv/modules/runners/push.py index 350a007e1..d58daba22 100644 --- a/srv/modules/runners/push.py +++ b/srv/modules/runners/push.py @@ -94,6 +94,18 @@ def proposal(filename="/srv/pillar/ceph/proposals/policy.cfg", dryrun=False): return True +def organize(filename="/srv/pillar/ceph/proposals/policy.cfg"): + """ + Read the passed filename, organize the files with common subdirectories + """ + if not os.path.isfile(filename): + log.warning("{} is missing".format(filename)) + return "" + pillar_data = PillarData() + common = pillar_data.organize(filename) + return common + + def convert(filename="/srv/pillar/ceph/proposals/policy.cfg"): """ Convert the hardware profiles that policy.cfg is using and update diff --git a/srv/modules/runners/remove.py b/srv/modules/runners/remove.py index b3a3ac6e0..414d8fe98 100644 --- a/srv/modules/runners/remove.py +++ b/srv/modules/runners/remove.py @@ -18,72 +18,39 @@ def help_(): """ Usage """ - usage = ('salt-run remove.osd id:\n\n' + usage = ('salt-run remove.osd id [id ...][force=True]:\n\n' ' Removes an OSD\n' '\n\n') print(usage) return "" -def osd(id_, drain=False): +def osd(*args, **kwargs): """ - Removes an OSD gracefully + Remove an OSD gracefully or forcefully. Always attempt to remove + ID from Ceph even if OSD has been removed from the minion. """ - runner_cli = salt.runner.RunnerClient( - salt.config.client_config('/etc/salt/master')) + result = __salt__['replace.osd'](*args, called=True, **kwargs) - if not runner_cli.cmd('disengage.check'): - log.error(('Safety is not disengaged...refusing to remove OSD', - ' run "salt-run disengage.safety" first' - ' THIS WILL CAUSE DATA LOSS.')) - return False + # Replace OSD exited early + if not result: + return "" - if id_ < 0: - log.error('Bogus id supplied...OSDs have IDs >= 0') - return False + master_minion = result['master_minion'] + osds = result['osds'] - local_cli = salt.client.LocalClient() + local = salt.client.LocalClient() - osds = local_cli.cmd('I@roles:storage', 'osd.list', tgt_type='compound') + for osd_id in osds: + cmds = ['ceph osd crush remove osd.{}'.format(osd_id), + 'ceph auth del osd.{}'.format(osd_id), + 'ceph osd rm {}'.format(osd_id)] - host = '' - for _osd in osds: - if '{}'.format(id_) in osds[_osd]: - host = _osd - break - else: - log.error('No OSD with ID {} found...giving up'.format(id_)) - return False + print("Removing osd {} from Ceph".format(osd_id)) + for cmd in cmds: + local.cmd(master_minion, 'cmd.run', [cmd], tgt_type='compound') - master_minion = list(local_cli.cmd('I@roles:master', 'pillar.get', - ['master_minion'], - tgt_type='compound').items())[0][1] - - if drain: - log.info('Draining OSD {} now'.format(id_)) - ret = local_cli.cmd(host, 'osd.zero_weight', [id_]) - - log.info('Setting OSD {} out'.format(id_)) - - ret = local_cli.cmd(master_minion, 'cmd.run', - ['ceph osd out {}'.format(id_)]) - - log.info('Stopping and wiping OSD {} now'.format(id_)) - - ret = local_cli.cmd(host, 'osd.remove', [id_]) - log.info(ret) - - ret = local_cli.cmd(master_minion, 'cmd.run', - ['ceph osd crush remove osd.{}'.format(id_)]) - log.info(ret) - ret = local_cli.cmd(master_minion, 'cmd.run', - ['ceph auth del osd.{}'.format(id_)]) - log.info(ret) - ret = local_cli.cmd(master_minion, 'cmd.run', - ['ceph osd rm {}'.format(id_)]) - log.info(ret) - - return True + return "" __func_alias__ = { 'help_': 'help', diff --git a/srv/modules/runners/replace.py b/srv/modules/runners/replace.py new file mode 100644 index 000000000..4f68d15a3 --- /dev/null +++ b/srv/modules/runners/replace.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +# pylint: disable=too-few-public-methods,modernize-parse-error +""" +Runner to remove a single osd +""" + +from __future__ import absolute_import +from __future__ import print_function +import time +import logging +import os +# pylint: disable=import-error,3rd-party-module-not-gated,redefined-builtin +import salt.client +import salt.runner + +log = logging.getLogger(__name__) + + +def help_(): + """ + Usage + """ + usage = ('salt-run replace.osd id [id ...][force=True][timeout=value][delay=value]:\n\n' + ' Removes an OSD from a minion\n' + '\n\n') + print(usage) + return "" + + +def osd(*args, **kwargs): + """ + Remove an OSD gracefully or forcefully on the minion + + Note: If I were not in Salt, this whole routine would become a library. + This runner can be called by remove.osd and is only different by three + commands. However, we have one runner call and multiple module calls below. + Trying to refactor these module calls into a __utils__ would require yet + another module. Debugging all of this at runtime would be add to the fun, + so I didn't. + """ + # Parameters for osd.remove module + supported = ['force', 'timeout', 'delay'] + passed = ["{}={}".format(k, v) for k, v in kwargs.items() if k in supported] + log.debug("Converted kwargs: {}".format(passed)) + + # OSDs to remove + osds = list(str(arg) for arg in args) + if _checks_failed(osds, kwargs): + return "" + + master_minion = _master_minion() + + local = salt.client.LocalClient() + host_osds = local.cmd('I@roles:storage', 'osd.list', tgt_type='compound') + + completed = osds + for osd_id in osds: + host = _find_host(osd_id, host_osds) + if host: + msg = _remove_osd(local, master_minion, osd_id, passed, host) + if msg: + print("{}\nFailed to remove osd {}".format(msg, osd_id)) + completed.remove(osd_id) + continue + + # Rename minion profile + minion_profile(host) + + if 'called' in kwargs and kwargs['called']: + # Return for remove.osd + return {'master_minion': master_minion, 'osds': completed} + return "" + + +def _checks_failed(osds, kwargs): + """ + Check the safety, argument length. Pause when multiple arguments are + passed to allow the admin to abort incorrect shell expansions + """ + # Checks + if not __salt__['disengage.check'](): + log.error('Safety engaged...run "salt-run disengage.safety"') + return True + + if len(osds) < 1: + help_() + return True + + if len(osds) > 1: + # Pause for a moment, let the admin see what they passed + print("Removing osds {} from minions\nPress Ctrl-C to abort".format(", ".join(osds))) + pause = 5 + if 'pause' in kwargs and kwargs['pause']: + pause = kwargs['pause'] + time.sleep(pause) + + return False + + +def _remove_osd(local, master_minion, osd_id, passed, host): + """ + Set OSD to out, remove OSD from minion + """ + local.cmd(master_minion, 'cmd.run', + ['ceph osd out {}'.format(osd_id)], + tgt_type='compound') + + print("Removing osd {} from minion {}".format(osd_id, host)) + msg = local.cmd(host, 'osd.remove', [osd_id] + passed)[host] + while msg.startswith("Timeout"): + print(" {}\nRetrying...".format(msg)) + msg = local.cmd(host, 'osd.remove', [osd_id] + passed)[host] + return msg + + +def _master_minion(): + """ + Load the master modules + """ + __master_opts__ = salt.config.client_config('/etc/salt/master') + __master_utils__ = salt.loader.utils(__master_opts__) + __salt_master__ = salt.loader.minion_mods(__master_opts__, + utils=__master_utils__) + + return __salt_master__['master.minion']() + + +def _find_host(osd_id, host_osds): + """ + Search lists for ID, return host + """ + for host in host_osds: + if str(osd_id) in host_osds[host]: + return host + return "" + + +def minion_profile(minion): + """ + Rename a minion profile to indicate that the minion profile needs to be + recreated. + + Note: Nobody is required to have profile entries in the policy.cfg. Some + might be modifying their pillar data directly. Also, the file will + not exist when called for multiple replacements. Lastly, minions may + belong to more than one hardware profile. Each must be renamed. + """ + files = __salt__['push.organize']() + + yaml_file = 'stack/default/ceph/minions/{}.yml'.format(minion) + if yaml_file in files: + for filename in files[yaml_file]: + if os.path.exists(filename): + print("Renaming minion {} profile".format(minion)) + os.rename(filename, "{}-replace".format(filename)) + return "" + + +__func_alias__ = { + 'help_': 'help', + } diff --git a/srv/modules/runners/select.py b/srv/modules/runners/select.py index d82af289e..d5e4808fe 100644 --- a/srv/modules/runners/select.py +++ b/srv/modules/runners/select.py @@ -51,7 +51,7 @@ def _grain_host(client, minion): """ Return the host grain for a given minion, for use a short hostname """ - return list(client.cmd(minion, 'grains.item', ['host']).values())[0]['host'] + return list(client.cmd(minion, 'grains.item', ['nodename']).values())[0]['nodename'] def minions(host=False, format='{}', **kwargs): @@ -96,6 +96,17 @@ def one_minion(**kwargs): return ret[0] +def first(**kwargs): + """ + Some steps only need to be run once, but on any minion in a specific + search. Return the first matching key. + """ + ret = sorted(minions(**kwargs)) + if ret: + return ret[0] + return "" + + def public_addresses(tuples=False, host=False, **kwargs): """ Returns an array of public addresses matching the search critieria. diff --git a/srv/modules/runners/smoketests.py b/srv/modules/runners/smoketests.py new file mode 100644 index 000000000..8fee83f02 --- /dev/null +++ b/srv/modules/runners/smoketests.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- +""" +Smoketest specific runner +""" + +from __future__ import absolute_import +from __future__ import print_function +import logging +import salt.client + +log = logging.getLogger(__name__) + + +class SmoketestPillar(object): + """ + Generates a pillar structure for overriding a storage configuration + """ + + def __init__(self, devices): + """ + Initialize base pillar structure + """ + self.base = {'ceph': {'storage': {'osds': {}}}} + self.devices = devices + + def create(self, configuration): + """ + Map functions + """ + funcs = {'filestore': self.filestore, + 'filestore2': self.filestore2, + 'bluestore': self.bluestore, + 'bluestore2': self.bluestore2, + 'bluestore3': self.bluestore3, + 'bluestored': self.bluestored} + + return funcs[configuration]() + + def checklist(self, configuration): + """ + Return a list of the OSD devices + + Note: a bit long for a list comprehension + """ + devices = [] + result = self.create(configuration) + for device in result['ceph']['storage']['osds'].keys(): + if result['ceph']['storage']['osds'][device]['format'] != 'none': + devices.append(device) + return devices + + def filestore(self): + """ + Return the first two devices set to a filestore format + """ + osds = {} + for device in self.devices[:2]: + osds[device] = {'format': 'filestore'} + for device in self.devices[2:]: + osds[device] = {'format': 'none'} + + self.base['ceph']['storage']['osds'] = osds + return self.base + + def filestore2(self): + """ + Return the first two devices set to filestore with a separate + journal on the third device + """ + osds = {} + for device in self.devices[:2]: + osds[device] = {'format': 'filestore', + 'journal': self.devices[2:3][0], + 'journal_size': '100M'} + for device in self.devices[2:]: + osds[device] = {'format': 'none'} + self.base['ceph']['storage']['osds'] = osds + return self.base + + def bluestore(self): + """ + Return the first two devices set to bluestore + """ + osds = {} + for device in self.devices[:2]: + osds[device] = {'format': 'bluestore'} + for device in self.devices[2:]: + osds[device] = {'format': 'none'} + + self.base['ceph']['storage']['osds'] = osds + return self.base + + def bluestore2(self): + """ + Return the first two devices set to bluestore with a separate + wal and db on the third device + """ + osds = {} + for device in self.devices[:2]: + osds[device] = {'format': 'bluestore', + 'db': self.devices[2:3][0], + 'db_size': '100M', + 'wal': self.devices[2:3][0], + 'wal_size': '100M'} + for device in self.devices[2:]: + osds[device] = {'format': 'none'} + + self.base['ceph']['storage']['osds'] = osds + return self.base + + def bluestore3(self): + """ + Return the first two devices set to bluestore, the db on the third + and the wal on the fourth. + """ + osds = {} + for device in self.devices[:2]: + osds[device] = {'format': 'bluestore', + 'db': self.devices[2:3][0], + 'db_size': '100M', + 'wal': self.devices[3:4][0], + 'wal_size': '100M'} + + for device in self.devices[2:]: + osds[device] = {'format': 'none'} + + self.base['ceph']['storage']['osds'] = osds + return self.base + + def bluestored(self): + """ + Return the first two devices set to encrypted bluestore + """ + osds = {} + for device in self.devices[:2]: + osds[device] = {'format': 'bluestore', + 'encryption': 'dmcrypt'} + for device in self.devices[2:]: + osds[device] = {'format': 'none'} + + self.base['ceph']['storage']['osds'] = osds + return self.base + + +def pillar(minion, configuration): + """ + Generate a pillar configuration to overwrite the existing pillar. Removing + keys from the pillar is cumbersome. Rely on overriding unnecessary disks + with a format of 'none'. + """ + local = salt.client.LocalClient() + devices = local.cmd(minion, 'cephdisks.filter', []) + stpl = SmoketestPillar(devices[minion]) + return stpl.create(configuration) + + +def checklist(minion, configuration): + """ + Save a checklist of devices on the minion + """ + local = salt.client.LocalClient() + devices = local.cmd(minion, 'cephdisks.filter', []) + stpl = SmoketestPillar(devices[minion]) + contents = stpl.checklist(configuration) + local.cmd(minion, 'file.write', ['/tmp/checklist', contents]) + return "" + + +def help_(): + """ + Usage + """ + usage = ('salt-run :\n\n' + ' \n' + '\n\n' + 'salt-run :\n\n' + ' \n' + '\n\n') + print(usage) + return "" + + +__func_alias__ = { + 'help_': 'help', + } diff --git a/srv/salt/_modules/cephdisks.py b/srv/salt/_modules/cephdisks.py index 90e2c9daf..9e066933f 100644 --- a/srv/salt/_modules/cephdisks.py +++ b/srv/salt/_modules/cephdisks.py @@ -512,6 +512,16 @@ def list_(**kwargs): return hwd.assemble_device_list() +def filter_(key="Device File", **kwargs): + """ + Return list of specified key + """ + hwd = HardwareDetections(**kwargs) + result = hwd.assemble_device_list() + results = [device[key] for device in result] + return sorted(results) + + def version(): """ Displays version @@ -521,4 +531,5 @@ def version(): __func_alias__ = { 'list_': 'list', + 'filter_': 'filter', } diff --git a/srv/salt/_modules/osd.py b/srv/salt/_modules/osd.py index 6e0b4e945..816125add 100644 --- a/srv/salt/_modules/osd.py +++ b/srv/salt/_modules/osd.py @@ -231,7 +231,7 @@ def __init__(self, _id, **kwargs): self.osd_id = _id self.settings = { 'conf': "/etc/ceph/ceph.conf", - 'filename': '/var/run/ceph/osd.{}-weight'.format(id), + 'filename': '/var/run/ceph/osd.{}-weight'.format(_id), 'timeout': 60, 'keyring': '/etc/ceph/ceph.client.admin.keyring', 'client': 'client.admin', @@ -339,8 +339,9 @@ def wait(self): i += 1 time.sleep(self.settings['delay']) - log.debug("Timeout expired") - raise RuntimeError("Timeout expired") + msg = "Timeout expired - OSD {} has {} PGs remaining".format(self.osd_id, last_pgs) + log.error(msg) + return msg class CephPGs(object): @@ -586,11 +587,10 @@ def set_bytes(self): for disk in disks[__grains__['id']]: if disk['Device File'] == self.device: return int(disk['Bytes']) - return None - else: - error = "Mine on {} for cephdisks.list".format(__grains__['id']) - log.error(error) - raise RuntimeError(error) + + error = "Missing device {} in the Salt mine for cephdisks.list".format(self.device) + log.error(error) + raise RuntimeError(error) def set_capacity(self): """ @@ -797,6 +797,12 @@ def clean(self): Note: expected to only run inside of "not is_prepared" """ + if (self.osd.disk_format != 'filestore' and + self.osd.disk_format != 'bluestore'): + log.warning(("Skipping clean of device {} with format " + "{}").format(self.osd.device, self.osd.disk_format)) + return + pathnames = _find_paths(self.osd.device) if pathnames: cmd = "sgdisk -Z --clear -g {}".format(self.osd.device) @@ -1221,7 +1227,7 @@ def _bluestore_args(self): args += "{}".format(self.osd.device) return args - def prepare(self): + def prepare(self, osd_id=None): """ Generate the correct prepare command. @@ -1244,6 +1250,9 @@ def prepare(self): if self.osd.device: cmd = "PYTHONWARNINGS=ignore ceph-disk -v prepare " + # specify OSD ID when replacing + if osd_id: + cmd += "--osd-id {} ".format(osd_id) # Dmcrypt if self.osd.encryption == 'dmcrypt': cmd += "--dmcrypt " @@ -1418,22 +1427,26 @@ class OSDRemove(object): """ # pylint: disable=unused-argument - def __init__(self, osd_id, device, weight, grains, force=False, **kwargs): + def __init__(self, osd_id, device, weight, grains, force=False, human=True, **kwargs): """ Initialize settings """ self.osd_id = osd_id self.osd_fsid = device.osd_fsid - self.partitions = self.set_partitions(device) + self.device = device + self.partitions = self.set_partitions() self._weight = weight self._grains = grains self.force = force + self.human = human + self.keyring = kwargs.get('keyring', None) + self.client = kwargs.get('client', None) - def set_partitions(self, device): + def set_partitions(self): """ Return queried partitions or fallback to grains """ - _partitions = device.partitions(self.osd_id) + _partitions = self.device.partitions(self.osd_id) if not _partitions: log.debug("grains: \n{}".format(pprint.pformat(__grains__['ceph']))) if str(self.osd_id) in __grains__['ceph']: @@ -1444,6 +1457,7 @@ def set_partitions(self, device): log.debug("partitions: \n{}".format(pprint.pformat(_partitions))) return _partitions + # pylint: disable=too-many-return-statements def remove(self): """ Wrapper for removing an OSD @@ -1455,22 +1469,44 @@ def remove(self): if self.force: log.warning("Forcing OSD removal") - else: - self.empty() - # Terminate - self.terminate() + # Terminate + self.terminate() - # Unmount filesystems - result = self.unmount() - if result: - return result + # Best effort depending on the reason for the forced removal + self.mark_destroyed() + update_destroyed(self._osd_disk(), self.osd_id) + else: + for func in [self.empty, self.terminate]: + msg = func() + if self.human and msg: + log.error(msg) + return msg + + # Inform Ceph + # + # Consider this a hard requirement for graceful removal. If the + # OSD cannot be marked and recorded, stop the process. + if self.mark_destroyed(): + msg = update_destroyed(self._osd_disk(), self.osd_id) + if msg: + log.error(msg) + return msg + log.info("OSD {} marked and recorded".format(self.osd_id)) + else: + msg = "Failed to mark OSD {} as destroyed".format(self.osd_id) + log.error(msg) + return msg - # Wipe partitions - self.wipe() + for func in [self.unmount, self.wipe, self.destroy]: + msg = func() + if msg: + log.error(msg) + return msg + + # Remove grain + self._grains.delete(self.osd_id) - # Destroy partitions - self.destroy() return "" def empty(self): @@ -1483,25 +1519,27 @@ def empty(self): msg = "Reweight failed" log.error(msg) return msg - self._weight.wait() - return "" + return self._weight.wait() def terminate(self): """ Stop the ceph-osd without error """ - # Check weight is zero cmd = "systemctl disable ceph-osd@{}".format(self.osd_id) __salt__['helper.run'](cmd) - # How long with this hang on a broken OSD + # How long will this hang on a broken OSD cmd = "systemctl stop ceph-osd@{}".format(self.osd_id) __salt__['helper.run'](cmd) - cmd = r"pkill -f ceph-osd.*{}\ --".format(self.osd_id) + cmd = r"pkill -f ceph-osd.*id\ {}\ --".format(self.osd_id) __salt__['helper.run'](cmd) time.sleep(1) - cmd = r"pkill -9 -f ceph-osd.*{}\ --".format(self.osd_id) + cmd = r"pkill -9 -f ceph-osd.*id\ {}\ --".format(self.osd_id) __salt__['helper.run'](cmd) time.sleep(1) + cmd = r"pgrep -f ceph-osd.*id\ {}\ --".format(self.osd_id) + _rc, _stdout, _stderr = __salt__['helper.run'](cmd) + if _rc == 0: + return "Failed to terminate OSD {} - pid {}".format(self.osd_id, _stdout) return "" def unmount(self): @@ -1549,12 +1587,13 @@ def wipe(self): if self.partitions: for _, _partition in six.iteritems(self.partitions): if os.path.exists(_partition): - cmd = "dd if=/dev/zero of={} bs=4096 count=1 oflag=direct".format(_partition) - __salt__['helper.run'](cmd) + cmd = "dd if=/dev/zero of={} bs=4M count=1 oflag=direct".format(_partition) + _rc, _stdout, _stderr = __salt__['helper.run'](cmd) + if _rc != 0: + return "Failed to wipe partition {}".format(_partition) else: msg = "Nothing to wipe - no partitions available" - log.error(msg) - return msg + log.warning(msg) return "" def destroy(self): @@ -1563,9 +1602,15 @@ def destroy(self): """ # pylint: disable=attribute-defined-outside-init self.osd_disk = self._osd_disk() - self._delete_partitions() + msg = self._delete_partitions() + if msg: + return msg self._wipe_gpt_backups() - self._delete_osd() + + msg = self._delete_osd() + if msg: + return msg + self._settle() return "" @@ -1607,9 +1652,12 @@ def _delete_partitions(self): if disk: log.debug("disk: {} partition: {}".format(disk, _partition)) cmd = "sgdisk -d {} {}".format(_partition, disk) - __salt__['helper.run'](cmd) + _rc, _stdout, _stderr = __salt__['helper.run'](cmd) + if _rc != 0: + return "Failed to delete partition {} on {}".format(_partition, disk) else: log.error("Partition {} does not exist".format(short_name)) + return "" def _wipe_gpt_backups(self): """ @@ -1623,8 +1671,7 @@ def _wipe_gpt_backups(self): cmd = ("dd if=/dev/zero of={} bs=4096 count=33 seek={} " "oflag=direct".format(self.osd_disk, seek_position)) __salt__['helper.run'](cmd) - return "" - return None + return "" def _delete_osd(self): """ @@ -1634,7 +1681,10 @@ def _delete_osd(self): cmd = "sgdisk -Z --clear -g {}".format(self.osd_disk) _rc, _stdout, _stderr = __salt__['helper.run'](cmd) if _rc != 0: - raise RuntimeError("{} failed".format(cmd)) + msg = "Failed to delete OSD {}".format(self.osd_disk) + log.error(msg) + return msg + return "" # pylint: disable=no-self-use def _settle(self): @@ -1646,6 +1696,17 @@ def _settle(self): 'udevadm settle --timeout=20']: __salt__['helper.run'](cmd) + def mark_destroyed(self): + """ + Mark the ID as destroyed in Ceph + """ + auth = "" + if self.keyring and self.client: + auth = "--keyring={} --name={}".format(self.keyring, self.client) + cmd = "ceph {} osd destroy {} --yes-i-really-mean-it".format(auth, self.osd_id) + _rc, _stdout, _stderr = __salt__['helper.run'](cmd) + return _rc == 0 + def remove(osd_id, **kwargs): """ @@ -1660,7 +1721,7 @@ def remove(osd_id, **kwargs): osdd = OSDDevices() osdg = OSDGrains(osdd) - osdr = OSDRemove(osd_id, osdd, osdw, osdg, **kwargs) + osdr = OSDRemove(osd_id, osdd, osdw, osdg, **settings) return osdr.remove() @@ -1805,6 +1866,162 @@ def _prefer_underscores(self, devicenames): return index +class OSDDestroyed(object): + """ + Maintain a key value store for destroyed OSDs. + + The workflow can get complicated. The first case is the normal case. The + use cases are + + 1) Device has a by-path equivalent. Save the by-path name and ID + 2) Device has no by-path equivalent. Save the current device name as an + indication to future runs that this device can safely be skipped. On + the first attempt, return as failed with instructions. + 3) Admin is saving actual device name of new device. + """ + + def __init__(self): + """ + Set the default filename. + """ + self.filename = "/etc/ceph/destroyedOSDs.yml" + + # Keep yaml human readable/editable + self.friendly_dumper = yaml.SafeDumper + self.friendly_dumper.ignore_aliases = lambda self, data: True + + def update(self, device, osd_id, force=False): + """ + Add the by-path version of device and osd_id. If by-path does not + exist, record current device and issue exception with instructions. + If forced, record current device. + """ + content = {} + if os.path.exists(self.filename): + with open(self.filename, 'r') as destroyed: + content = yaml.safe_load(destroyed) + log.debug("content: {} {}".format(type(content), content)) + + if device in content: + # Exit early, no by-path equivalent from previous run + return "" + + by_path = self._by_path(device) + if by_path and not force: + content[by_path] = osd_id + else: + # by-path device is missing, save current device to allow + # the OSD to be removed and rely on admin following instructions + # below OR admin is overriding the save manually with the new + # device name. In either case, save the device name with the ID. + content[device] = osd_id + + with open(self.filename, 'w') as destroyed: + destroyed.write(yaml.dump(content, Dumper=self.friendly_dumper, + default_flow_style=False)) + + if by_path or force: + return "" + + # Hard enough to read without the else indent + example = '/dev/disk/by-id/new_device_name' + msg = ("Device {} is missing a /dev/disk/by-path symlink.\n" + "Device cannot be replaced automatically.\n\n" + "Replace the device, find the new device name and run\n\n" + "salt {} osd.update_destroyed {} {}" + ).format(device, __grains__['id'], example, osd_id) + log.error(msg) + return msg + + def get(self, device): + """ + Return ID + """ + if os.path.exists(self.filename): + with open(self.filename, 'r') as destroyed: + content = yaml.safe_load(destroyed) + by_path = self._by_path(device) + if by_path and by_path in content: + return content[by_path] + if device in content: + return content[device] + + return "" + + # pylint: disable=no-self-use + def _by_path(self, device): + """ + Return the equivalent by-path device name + """ + cmd = (r"find -L /dev/disk/by-path -samefile {}".format(device)) + _rc, _stdout, _stderr = __salt__['helper.run'](cmd) + if _stdout: + _devices = _stdout.split() + if _devices: + return _devices[0] + return "" + + def remove(self, device): + """ + Remove entry + """ + if os.path.exists(self.filename): + with open(self.filename, 'r') as destroyed: + content = yaml.safe_load(destroyed) + by_path = self._by_path(device) + if by_path and by_path in content: + del content[by_path] + # Normally absent + if device in content: + del content[device] + with open(self.filename, 'w') as destroyed: + destroyed.write(yaml.dump(content, Dumper=self.friendly_dumper, + default_flow_style=False)) + + def dump(self): + """ + Display all devices, IDs + """ + if os.path.exists(self.filename): + with open(self.filename, 'r') as destroyed: + content = yaml.safe_load(destroyed) + return content + return "" + + +def update_destroyed(device, osd_id): + """ + Save the ID + """ + osd_d = OSDDestroyed() + return osd_d.update(device, osd_id) + + +def find_destroyed(device): + """ + Return the ID for a device + """ + osd_d = OSDDestroyed() + return osd_d.get(str(device)) + + +def remove_destroyed(device): + """ + Remove the device + """ + osd_d = OSDDestroyed() + osd_d.remove(device) + return "" + + +def dump_destroyed(): + """ + Display all devices, IDs + """ + osd_d = OSDDestroyed() + return osd_d.dump() + + # pylint: disable=too-few-public-methods class OSDGrains(object): """ @@ -1816,11 +2033,12 @@ class OSDGrains(object): remove this entry. """ - def __init__(self, device, pathname="/var/lib/ceph/osd"): + def __init__(self, device, pathname="/var/lib/ceph/osd", filename="/etc/salt/grains"): """ Initialize settings """ self.pathname = pathname + self.filename = filename self.partitions = device.partitions self.osd_fsid = device.osd_fsid @@ -1838,32 +2056,48 @@ def retain(self): log.debug("osd {}: {}".format(osd_id, pprint.pformat(storage[osd_id]))) self._grains(storage) - def _grains(self, storage, filename="/etc/salt/grains"): + def delete(self, osd_id): + """ + Delete an OSD entry + """ + content = {} + if os.path.exists(self.filename): + with open(self.filename, 'r') as minion_grains: + content = yaml.safe_load(minion_grains) + # pylint: disable=bare-except + try: + del content['ceph'][str(osd_id)] + except: + log.error("Cannot delete osd {} from grains".format(osd_id)) + if content: + self._update_grains(content) + + def _grains(self, storage): """ Load and save grains when changed """ content = {} - if os.path.exists(filename): - with open(filename, 'r') as minion_grains: + if os.path.exists(self.filename): + with open(self.filename, 'r') as minion_grains: content = yaml.safe_load(minion_grains) if 'ceph' in content and content['ceph'] == storage: - log.debug("No update for {}".format(filename)) + log.debug("No update for {}".format(self.filename)) else: content['ceph'] = storage self._update_grains(content) # pylint: disable=no-self-use - def _update_grains(self, content, filename="/etc/salt/grains"): + def _update_grains(self, content): """ Update the yaml file without destroying other content """ - log.info("Updating {}".format(filename)) + log.info("Updating {}".format(self.filename)) # Keep yaml human readable/editable friendly_dumper = yaml.SafeDumper friendly_dumper.ignore_aliases = lambda self, data: True - with open(filename, 'w') as minion_grains: + with open(self.filename, 'w') as minion_grains: minion_grains.write(yaml.dump(content, Dumper=friendly_dumper, default_flow_style=False)) @@ -1912,8 +2146,12 @@ def deploy(): osdp.clean() osdp.partition() osdc = OSDCommands(config) - __salt__['helper.run'](osdc.prepare()) + previous_id = find_destroyed(device) + __salt__['helper.run'](osdc.prepare(previous_id)) __salt__['helper.run'](osdc.activate()) + remove_destroyed(device) + if previous_id: + restore_weight(previous_id) def redeploy(simultaneous=False, **kwargs): @@ -1934,10 +2172,6 @@ def redeploy(simultaneous=False, **kwargs): settings = _settings(**kwargs) for _id in __grains__['ceph']: _part = _partition(_id) - # if 'lockbox' in __grains__['ceph'][_id]['partitions']: - # partition = __grains__['ceph'][_id]['partitions']['lockbox'] - # else: - # partition = __grains__['ceph'][_id]['partitions']['osd'] log.info("Partition: {}".format(_part)) disk, _ = split_partition(_part) log.info("ID: {}".format(_id)) @@ -1950,9 +2184,10 @@ def redeploy(simultaneous=False, **kwargs): osdp = OSDPartitions(config) osdp.partition() osdc = OSDCommands(config) - __salt__['helper.run'](osdc.prepare()) + __salt__['helper.run'](osdc.prepare(_id)) + restore_weight(_id) __salt__['helper.run'](osdc.activate()) - # not is_prepared(disk)): + remove_destroyed(disk) def _partition(osd_id): @@ -2018,7 +2253,7 @@ def is_activated(device): return "/bin/false" -def prepare(device): +def prepare(device, osd_id=None): """ Return ceph-disk command to prepare OSD. @@ -2028,7 +2263,7 @@ def prepare(device): """ config = OSDConfig(device) osdc = OSDCommands(config) - return osdc.prepare() + return osdc.prepare(osd_id) def activate(device): @@ -2073,30 +2308,73 @@ def retain(): return osdg.retain() -def report(failhard=False): +def delete_grain(osd_id): + """ + Delete an individual OSD grain + """ + osdd = OSDDevices() + osdg = OSDGrains(osdd) + return osdg.delete(osd_id) + + +def report(human=True): """ Display the difference between the pillar and grains for the OSDs + """ + active, unmounted = _report_grains() + un1, ch1 = _report_pillar(active) + un2, ch2 = _report_original_pillar(active) + + unconfigured = un1 + un2 + changed = ch1 + ch2 + + if human: + if unconfigured or changed or unmounted: + msg = "" + if unconfigured: + msg += "No OSD configured for \n{}\n".format("\n".join(unconfigured)) + if changed: + msg += "Different configuration for \n{}\n".format("\n".join(changed)) + if unmounted: + msg += "No OSD mounted for \n{}\n".format("\n".join(unmounted)) + return msg + else: + return "All configured OSDs are active" + else: + return {'unconfigured': unconfigured, + 'changed': changed, + 'unmounted': unmounted} + - Note: this needs more bullet proofing +def _report_grains(): + """ + Return the active and unmounted lists """ - if 'ceph' not in __grains__: - return "No ceph grain available. Run osd.retain" active = [] unmounted = [] - for _id in __grains__['ceph']: - _partition = readlink(__grains__['ceph'][_id]['partitions']['osd']) - disk, _ = split_partition(_partition) - active.append(disk) - log.debug("checking /var/lib/ceph/osd/ceph-{}/fsid".format(_id)) - if not os.path.exists("/var/lib/ceph/osd/ceph-{}/fsid".format(_id)): - unmounted.append(disk) - if 'lockbox' in __grains__['ceph'][_id]['partitions']: - _partition = readlink(__grains__['ceph'][_id]['partitions']['lockbox']) + if 'ceph' in __grains__: + for _id in __grains__['ceph']: + _partition = readlink(__grains__['ceph'][_id]['partitions']['osd']) disk, _ = split_partition(_partition) active.append(disk) + log.debug("checking /var/lib/ceph/osd/ceph-{}/fsid".format(_id)) + if not os.path.exists("/var/lib/ceph/osd/ceph-{}/fsid".format(_id)): + unmounted.append(disk) + if 'lockbox' in __grains__['ceph'][_id]['partitions']: + _partition = readlink(__grains__['ceph'][_id]['partitions']['lockbox']) + disk, _ = split_partition(_partition) + active.append(disk) + return active, unmounted + +def _report_pillar(active): + """ + Return the unconfigured and changed lists + """ log.debug("active: {}".format(active)) + unconfigured = [] + changed = [] if 'ceph' in __pillar__: unconfigured = list(__pillar__['ceph']['storage']['osds'].keys()) changed = list(unconfigured) @@ -2111,7 +2389,16 @@ def report(failhard=False): changed.remove(osd) log.debug("changed: {}".format(active)) + return unconfigured, changed + +def _report_original_pillar(active): + """ + Return the unconfigured and changed lists from the original pillar + structure + """ + unconfigured = [] + changed = [] if 'storage' in __pillar__: unconfigured = __pillar__['storage']['osds'] for _dj in __pillar__['storage']['data+journals']: @@ -2128,21 +2415,7 @@ def report(failhard=False): else: log.debug("Removed from changed {}".format(osd)) changed.remove(osd) - - if unconfigured or changed or unmounted: - msg = "" - if unconfigured: - msg += "No OSD configured for \n{}\n".format("\n".join(unconfigured)) - if changed: - msg += "Different configuration for \n{}\n".format("\n".join(changed)) - if unmounted: - msg += "No OSD mounted for \n{}\n".format("\n".join(unmounted)) - if failhard: - raise RuntimeError(msg) - else: - return msg - else: - return "All configured OSDs are active" + return unconfigured, changed __func_alias__ = { diff --git a/srv/salt/_states/osd.py b/srv/salt/_states/osd.py new file mode 100644 index 000000000..1200d01b8 --- /dev/null +++ b/srv/salt/_states/osd.py @@ -0,0 +1,26 @@ + + +def correct(name, device): + """ + """ + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': ''} + + if __opts__['test'] == True: + return ret + + if isinstance(device, list): + for dev in device: + result = __salt__['osd.is_incorrect'](dev) + if result: + break + else: + result = __salt__['osd.is_incorrect'](device) + + if result: + ret['result'] = False + else: + ret['result'] = True + return ret diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore/bluestore2.sls b/srv/salt/ceph/functests/1node/migrate/bluestore/bluestore2.sls new file mode 100644 index 000000000..385bb6391 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore/bluestore2.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "btob2" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore2' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore/bluestore3.sls b/srv/salt/ceph/functests/1node/migrate/bluestore/bluestore3.sls new file mode 100644 index 000000000..e56cfcfaa --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore/bluestore3.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "btob3" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check4 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore3' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore/bluestored.sls b/srv/salt/ceph/functests/1node/migrate/bluestore/bluestored.sls new file mode 100644 index 000000000..267f1b270 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore/bluestored.sls @@ -0,0 +1,54 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "btobd" %} + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestored') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestored' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestored') }} + + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore/filestore.sls b/srv/salt/ceph/functests/1node/migrate/bluestore/filestore.sls new file mode 100644 index 000000000..415865535 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore/filestore.sls @@ -0,0 +1,60 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "btof" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check2 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore/filestore2.sls b/srv/salt/ceph/functests/1node/migrate/bluestore/filestore2.sls new file mode 100644 index 000000000..b2a22a495 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore/filestore2.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "btof2" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore2' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore/init.sls b/srv/salt/ceph/functests/1node/migrate/bluestore/init.sls new file mode 100644 index 000000000..dff5bb7ef --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore/init.sls @@ -0,0 +1,6 @@ + +include: + - .bluestore2 + - .bluestore3 + - .filestore + - .filestore2 diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore2/bluestore.sls b/srv/salt/ceph/functests/1node/migrate/bluestore2/bluestore.sls new file mode 100644 index 000000000..29938c6a1 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore2/bluestore.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "b2tob" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore2' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore2/bluestore3.sls b/srv/salt/ceph/functests/1node/migrate/bluestore2/bluestore3.sls new file mode 100644 index 000000000..ee275ef8c --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore2/bluestore3.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "b2tob3" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check4 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore2' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore3' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore2/bluestored.sls b/srv/salt/ceph/functests/1node/migrate/bluestore2/bluestored.sls new file mode 100644 index 000000000..4e6debe2c --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore2/bluestored.sls @@ -0,0 +1,60 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "b2tobd" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore2' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestored') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestored' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestored') }} + + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore2/filestore.sls b/srv/salt/ceph/functests/1node/migrate/bluestore2/filestore.sls new file mode 100644 index 000000000..71161cbff --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore2/filestore.sls @@ -0,0 +1,60 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "b2tof" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore2' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore2/filestore2.sls b/srv/salt/ceph/functests/1node/migrate/bluestore2/filestore2.sls new file mode 100644 index 000000000..5ddcfb93e --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore2/filestore2.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "b2tof2" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore2' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore2' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore2/init.sls b/srv/salt/ceph/functests/1node/migrate/bluestore2/init.sls new file mode 100644 index 000000000..724c34244 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore2/init.sls @@ -0,0 +1,6 @@ + +include: + - .bluestore + - .bluestore3 + - .filestore + - .filestore2 diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore3/bluestore.sls b/srv/salt/ceph/functests/1node/migrate/bluestore3/bluestore.sls new file mode 100644 index 000000000..bd737934e --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore3/bluestore.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "b3tob" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check4 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore3' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore3/bluestore2.sls b/srv/salt/ceph/functests/1node/migrate/bluestore3/bluestore2.sls new file mode 100644 index 000000000..9a1e56e4d --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore3/bluestore2.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "b3tob2" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check4 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore3' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore2' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore3/bluestored.sls b/srv/salt/ceph/functests/1node/migrate/bluestore3/bluestored.sls new file mode 100644 index 000000000..cb56bca96 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore3/bluestored.sls @@ -0,0 +1,60 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "b3tobd" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check4 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore3' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestored') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestored' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestored') }} + + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore3/filestore.sls b/srv/salt/ceph/functests/1node/migrate/bluestore3/filestore.sls new file mode 100644 index 000000000..6614c19e4 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore3/filestore.sls @@ -0,0 +1,60 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "b3tof" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check4 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore3' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore3/filestore2.sls b/srv/salt/ceph/functests/1node/migrate/bluestore3/filestore2.sls new file mode 100644 index 000000000..d2749565e --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore3/filestore2.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "b3tof2" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check4 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore3' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore2' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/bluestore3/init.sls b/srv/salt/ceph/functests/1node/migrate/bluestore3/init.sls new file mode 100644 index 000000000..db2c37db0 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/bluestore3/init.sls @@ -0,0 +1,6 @@ + +include: + - .bluestore2 + - .bluestore + - .filestore + - .filestore2 diff --git a/srv/salt/ceph/functests/1node/migrate/filestore/bluestore.sls b/srv/salt/ceph/functests/1node/migrate/filestore/bluestore.sls new file mode 100644 index 000000000..978abcff3 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore/bluestore.sls @@ -0,0 +1,60 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "ftob" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check2 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + + diff --git a/srv/salt/ceph/functests/1node/migrate/filestore/bluestore2.sls b/srv/salt/ceph/functests/1node/migrate/filestore/bluestore2.sls new file mode 100644 index 000000000..40e3d29a5 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore/bluestore2.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "ftob2" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore2' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/filestore/bluestore3.sls b/srv/salt/ceph/functests/1node/migrate/filestore/bluestore3.sls new file mode 100644 index 000000000..e75397ae7 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore/bluestore3.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "ftob3" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check4 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore3' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/filestore/bluestored.sls b/srv/salt/ceph/functests/1node/migrate/filestore/bluestored.sls new file mode 100644 index 000000000..558b88f86 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore/bluestored.sls @@ -0,0 +1,60 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "ftobd" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check4 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestored') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestored' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestored') }} + + diff --git a/srv/salt/ceph/functests/1node/migrate/filestore/filestore2.sls b/srv/salt/ceph/functests/1node/migrate/filestore/filestore2.sls new file mode 100644 index 000000000..3593c9b2d --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore/filestore2.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "ftof2" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore2' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/filestore/init.sls b/srv/salt/ceph/functests/1node/migrate/filestore/init.sls new file mode 100644 index 000000000..a8411a170 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore/init.sls @@ -0,0 +1,6 @@ + +include: + - .bluestore + - .bluestore2 + - .bluestore3 + - .filestore2 diff --git a/srv/salt/ceph/functests/1node/migrate/filestore2/bluestore.sls b/srv/salt/ceph/functests/1node/migrate/filestore2/bluestore.sls new file mode 100644 index 000000000..5dcc9fcea --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore2/bluestore.sls @@ -0,0 +1,60 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "f2tob" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore2' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore') }} + + diff --git a/srv/salt/ceph/functests/1node/migrate/filestore2/bluestore2.sls b/srv/salt/ceph/functests/1node/migrate/filestore2/bluestore2.sls new file mode 100644 index 000000000..69110caf4 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore2/bluestore2.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "f2tob2" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore2' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore2' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore2') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/filestore2/bluestore3.sls b/srv/salt/ceph/functests/1node/migrate/filestore2/bluestore3.sls new file mode 100644 index 000000000..19d258ec5 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore2/bluestore3.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "f2tob3" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check4 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore2' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestore3' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestore3') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/filestore2/bluestored.sls b/srv/salt/ceph/functests/1node/migrate/filestore2/bluestored.sls new file mode 100644 index 000000000..c12a36b91 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore2/bluestored.sls @@ -0,0 +1,60 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "f2tobd" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore2' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestored') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'bluestored' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='bluestored') }} + + diff --git a/srv/salt/ceph/functests/1node/migrate/filestore2/filestore.sls b/srv/salt/ceph/functests/1node/migrate/filestore2/filestore.sls new file mode 100644 index 000000000..a27ae3aaf --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore2/filestore.sls @@ -0,0 +1,59 @@ + +{% set node = salt.saltutil.runner('select.first', roles='storage') %} +{% set label = "f2tof" %} + +Check environment {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check3 + - failhard: True + +Remove OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.remove_osds + +Remove destroyed {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.remove.destroyed + +Initialize OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.init_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + +Save reset checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore2' + +Check reset OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore2') }} + - failhard: True + +Migrate {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.redeploy.osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + +Save checklist {{ label }}: + salt.runner: + - name: smoketests.checklist + - arg: + - {{ node }} + - 'filestore' + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ node }} + - sls: ceph.tests.migrate.check_osds + - pillar: {{ salt.saltutil.runner('smoketests.pillar', minion=node, configuration='filestore') }} + diff --git a/srv/salt/ceph/functests/1node/migrate/filestore2/init.sls b/srv/salt/ceph/functests/1node/migrate/filestore2/init.sls new file mode 100644 index 000000000..7e7629627 --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/filestore2/init.sls @@ -0,0 +1,6 @@ + +include: + - .bluestore + - .bluestore2 + - .bluestore3 + - .filestore diff --git a/srv/salt/ceph/functests/1node/migrate/init.sls b/srv/salt/ceph/functests/1node/migrate/init.sls new file mode 100644 index 000000000..15324435f --- /dev/null +++ b/srv/salt/ceph/functests/1node/migrate/init.sls @@ -0,0 +1,7 @@ + +include: + - .filestore + - .filestore2 + - .bluestore + - .bluestore2 + - .bluestore3 diff --git a/srv/salt/ceph/functests/1node/remove/delay.sls b/srv/salt/ceph/functests/1node/remove/delay.sls new file mode 100644 index 000000000..dc6d63cd5 --- /dev/null +++ b/srv/salt/ceph/functests/1node/remove/delay.sls @@ -0,0 +1,32 @@ + +{% set label = "delay" %} + +Disengage {{ label }}: + salt.runner: + - name: disengage.safety + +keyword arguments: + salt.runner: + - name: remove.osd + - arg: + - 0 + - kwarg: + delay: 1 + timeout: 1 + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.tests.remove.check_0 + +Restore OSDs {{ label }}: + salt.state: + - tgt: I@roles:storage + - sls: ceph.tests.remove.restore_osds + - tgt_type: compound + +Wait for Ceph {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.wait.until.OK + diff --git a/srv/salt/ceph/functests/1node/remove/force.sls b/srv/salt/ceph/functests/1node/remove/force.sls new file mode 100644 index 000000000..a0c797933 --- /dev/null +++ b/srv/salt/ceph/functests/1node/remove/force.sls @@ -0,0 +1,31 @@ + +{% set label = "force" %} + +Disengage {{ label }}: + salt.runner: + - name: disengage.safety + +forced removal: + salt.runner: + - name: remove.osd + - arg: + - 0 + - kwarg: + force: True + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.tests.remove.check_0 + +Restore OSDs {{ label }}: + salt.state: + - tgt: I@roles:storage + - sls: ceph.tests.remove.restore_osds + - tgt_type: compound + +Wait for Ceph {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.wait.until.OK + diff --git a/srv/salt/ceph/functests/1node/remove/init.sls b/srv/salt/ceph/functests/1node/remove/init.sls new file mode 100644 index 000000000..dbae73907 --- /dev/null +++ b/srv/salt/ceph/functests/1node/remove/init.sls @@ -0,0 +1,5 @@ + +include: + - .multiple + - .delay + - .force diff --git a/srv/salt/ceph/functests/1node/remove/multiple.sls b/srv/salt/ceph/functests/1node/remove/multiple.sls new file mode 100644 index 000000000..e82d77180 --- /dev/null +++ b/srv/salt/ceph/functests/1node/remove/multiple.sls @@ -0,0 +1,30 @@ + +{% set label = "multiple" %} + +Disengage {{ label }}: + salt.runner: + - name: disengage.safety + +Multiple arguments: + salt.runner: + - name: remove.osd + - arg: + - 0 + - 1 + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.tests.remove.check_absent + +Restore OSDs {{ label }}: + salt.state: + - tgt: 'I@roles:storage' + - sls: ceph.tests.remove.restore_osds + - tgt_type: compound + +Wait for Ceph {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.wait.until.OK + diff --git a/srv/salt/ceph/functests/1node/replace/delay.sls b/srv/salt/ceph/functests/1node/replace/delay.sls new file mode 100644 index 000000000..0ce7957a5 --- /dev/null +++ b/srv/salt/ceph/functests/1node/replace/delay.sls @@ -0,0 +1,32 @@ + +{% set label = "delay" %} + +Disengage {{ label }}: + salt.runner: + - name: disengage.safety + +keyword arguments: + salt.runner: + - name: replace.osd + - arg: + - 0 + - kwarg: + delay: 1 + timeout: 1 + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.tests.replace.check_0 + +Restore OSDs {{ label }}: + salt.state: + - tgt: I@roles:storage + - sls: ceph.tests.replace.restore_osds + - tgt_type: compound + +Wait for Ceph {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.wait.until.OK + diff --git a/srv/salt/ceph/functests/1node/replace/force.sls b/srv/salt/ceph/functests/1node/replace/force.sls new file mode 100644 index 000000000..106947a9a --- /dev/null +++ b/srv/salt/ceph/functests/1node/replace/force.sls @@ -0,0 +1,31 @@ + +{% set label = "force" %} + +Disengage {{ label }}: + salt.runner: + - name: disengage.safety + +forced removal: + salt.runner: + - name: replace.osd + - arg: + - 0 + - kwarg: + force: True + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.tests.replace.check_0 + +Restore OSDs {{ label }}: + salt.state: + - tgt: I@roles:storage + - sls: ceph.tests.replace.restore_osds + - tgt_type: compound + +Wait for Ceph {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.wait.until.OK + diff --git a/srv/salt/ceph/functests/1node/replace/init.sls b/srv/salt/ceph/functests/1node/replace/init.sls new file mode 100644 index 000000000..dbae73907 --- /dev/null +++ b/srv/salt/ceph/functests/1node/replace/init.sls @@ -0,0 +1,5 @@ + +include: + - .multiple + - .delay + - .force diff --git a/srv/salt/ceph/functests/1node/replace/multiple.sls b/srv/salt/ceph/functests/1node/replace/multiple.sls new file mode 100644 index 000000000..acfba2891 --- /dev/null +++ b/srv/salt/ceph/functests/1node/replace/multiple.sls @@ -0,0 +1,30 @@ + +{% set label = "multiple" %} + +Disengage {{ label }}: + salt.runner: + - name: disengage.safety + +Multiple arguments: + salt.runner: + - name: replace.osd + - arg: + - 0 + - 1 + +Check OSDs {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.tests.replace.check_absent + +Restore OSDs {{ label }}: + salt.state: + - tgt: 'I@roles:storage' + - sls: ceph.tests.replace.restore_osds + - tgt_type: compound + +Wait for Ceph {{ label }}: + salt.state: + - tgt: {{ salt['master.minion']() }} + - sls: ceph.wait.until.OK + diff --git a/srv/salt/ceph/remove/migrated/default.sls b/srv/salt/ceph/remove/destroyed/default.sls similarity index 100% rename from srv/salt/ceph/remove/migrated/default.sls rename to srv/salt/ceph/remove/destroyed/default.sls diff --git a/srv/salt/ceph/remove/migrated/init.sls b/srv/salt/ceph/remove/destroyed/init.sls similarity index 100% rename from srv/salt/ceph/remove/migrated/init.sls rename to srv/salt/ceph/remove/destroyed/init.sls diff --git a/srv/salt/ceph/remove/migrated b/srv/salt/ceph/remove/migrated new file mode 120000 index 000000000..134679149 --- /dev/null +++ b/srv/salt/ceph/remove/migrated @@ -0,0 +1 @@ +destroyed \ No newline at end of file diff --git a/srv/salt/ceph/stage/configure/default.sls b/srv/salt/ceph/stage/configure/default.sls index ab9935603..8f524c183 100644 --- a/srv/salt/ceph/stage/configure/default.sls +++ b/srv/salt/ceph/stage/configure/default.sls @@ -47,3 +47,7 @@ install and setup node exporters: - tgt_type: compound - sls: ceph.monitoring.prometheus.exporters.node_exporter +advise OSDs: + salt.runner: + - name: advise.osds + diff --git a/srv/salt/ceph/tests/migrate/check2.sls b/srv/salt/ceph/tests/migrate/check2.sls new file mode 100644 index 000000000..632890494 --- /dev/null +++ b/srv/salt/ceph/tests/migrate/check2.sls @@ -0,0 +1,8 @@ + +{% set device_count = salt['cephdisks.filter']() | length %} +Device count {{ device_count }} at {{ salt['status.time']('%s') }}: + test.nop +{% if device_count < 2 %} +Skipping, not enough devices: + test.fail_without_changes +{% endif %} diff --git a/srv/salt/ceph/tests/migrate/check3.sls b/srv/salt/ceph/tests/migrate/check3.sls new file mode 100644 index 000000000..5a1f233eb --- /dev/null +++ b/srv/salt/ceph/tests/migrate/check3.sls @@ -0,0 +1,8 @@ + +{% set device_count = salt['cephdisks.filter']() | length %} +Device count {{ device_count }} at {{ salt['status.time']('%s') }}: + test.nop +{% if device_count < 3 %} +Skipping, not enough devices: + test.fail_without_changes +{% endif %} diff --git a/srv/salt/ceph/tests/migrate/check4.sls b/srv/salt/ceph/tests/migrate/check4.sls new file mode 100644 index 000000000..9cffef6e6 --- /dev/null +++ b/srv/salt/ceph/tests/migrate/check4.sls @@ -0,0 +1,8 @@ + +{% set device_count = salt['cephdisks.filter']() | length %} +Device count {{ device_count }} at {{ salt['status.time']('%s') }}: + test.nop +{% if device_count < 4 %} +Skipping, not enough devices: + test.fail_without_changes +{% endif %} diff --git a/srv/salt/ceph/tests/migrate/check_osds.sls b/srv/salt/ceph/tests/migrate/check_osds.sls new file mode 100644 index 000000000..9afb7af38 --- /dev/null +++ b/srv/salt/ceph/tests/migrate/check_osds.sls @@ -0,0 +1,5 @@ + +check: + osd.correct: + - device: {{ salt['file.read']('/tmp/checklist') }} + diff --git a/srv/salt/ceph/tests/migrate/init_osds.sls b/srv/salt/ceph/tests/migrate/init_osds.sls new file mode 100644 index 000000000..1a4c03ef7 --- /dev/null +++ b/srv/salt/ceph/tests/migrate/init_osds.sls @@ -0,0 +1,9 @@ + +creating OSD: + module.run: + - name: osd.deploy + +save grains: + module.run: + - name: osd.retain + diff --git a/srv/salt/ceph/tests/migrate/remove_osds.sls b/srv/salt/ceph/tests/migrate/remove_osds.sls new file mode 100644 index 000000000..4244ef060 --- /dev/null +++ b/srv/salt/ceph/tests/migrate/remove_osds.sls @@ -0,0 +1,14 @@ + +Removing OSDs: + test.nop + +{% for id in salt['osd.list']() %} + +removing {{ id }}: + module.run: + - name: osd.remove + - osd_id: {{ id }} + +{% endfor %} + + diff --git a/srv/salt/ceph/tests/migrate/reset_osds.sls b/srv/salt/ceph/tests/migrate/reset_osds.sls new file mode 100644 index 000000000..13fe90bdf --- /dev/null +++ b/srv/salt/ceph/tests/migrate/reset_osds.sls @@ -0,0 +1,18 @@ + +{% for id in salt['osd.list']() %} + +removing {{ id }}: + module.run: + - name: osd.remove + - osd_id: {{ id }} + +{% endfor %} + +creating OSD: + module.run: + - name: osd.deploy + +save grains: + module.run: + - name: osd.retain + diff --git a/srv/salt/ceph/tests/remove/check_0.sls b/srv/salt/ceph/tests/remove/check_0.sls new file mode 100644 index 000000000..cf3080aa5 --- /dev/null +++ b/srv/salt/ceph/tests/remove/check_0.sls @@ -0,0 +1,5 @@ + +absent OSDs: + cmd.run: + - name: /bin/false + - onlyif: ceph osd ls | egrep -q '^0$' diff --git a/srv/salt/ceph/tests/remove/check_absent.sls b/srv/salt/ceph/tests/remove/check_absent.sls new file mode 100644 index 000000000..e39b04cd3 --- /dev/null +++ b/srv/salt/ceph/tests/remove/check_absent.sls @@ -0,0 +1,5 @@ + +absent OSDs: + cmd.run: + - name: /bin/false + - onlyif: ceph osd ls | egrep -q '^0$|^1$' diff --git a/srv/salt/ceph/tests/remove/restore_osds.sls b/srv/salt/ceph/tests/remove/restore_osds.sls new file mode 100644 index 000000000..a45c61ff4 --- /dev/null +++ b/srv/salt/ceph/tests/remove/restore_osds.sls @@ -0,0 +1,8 @@ + +creating OSDs: + module.run: + - name: osd.deploy + +save grains: + module.run: + - name: osd.retain diff --git a/srv/salt/ceph/tests/replace/check_0.sls b/srv/salt/ceph/tests/replace/check_0.sls new file mode 100644 index 000000000..df03d7aa3 --- /dev/null +++ b/srv/salt/ceph/tests/replace/check_0.sls @@ -0,0 +1,5 @@ + +absent OSDs: + cmd.run: + - name: /bin/false + - unless: ceph osd ls | egrep -q '^0$' diff --git a/srv/salt/ceph/tests/replace/check_absent.sls b/srv/salt/ceph/tests/replace/check_absent.sls new file mode 100644 index 000000000..351f8541c --- /dev/null +++ b/srv/salt/ceph/tests/replace/check_absent.sls @@ -0,0 +1,5 @@ + +absent OSDs: + cmd.run: + - name: /bin/false + - unless: ceph osd ls | egrep -q '^0$|^1$' diff --git a/srv/salt/ceph/tests/replace/restore_osds.sls b/srv/salt/ceph/tests/replace/restore_osds.sls new file mode 100644 index 000000000..a45c61ff4 --- /dev/null +++ b/srv/salt/ceph/tests/replace/restore_osds.sls @@ -0,0 +1,8 @@ + +creating OSDs: + module.run: + - name: osd.deploy + +save grains: + module.run: + - name: osd.retain diff --git a/tests/unit/_modules/test_osd.py b/tests/unit/_modules/test_osd.py index 971b6507f..50d0cb37f 100644 --- a/tests/unit/_modules/test_osd.py +++ b/tests/unit/_modules/test_osd.py @@ -147,10 +147,10 @@ def test_wait(self, ostd): ret = osdw.wait() assert ret == "" - @pytest.mark.skip(reason='skip') + @patch('time.sleep') @patch('srv.salt._modules.osd.OSDWeight.osd_df') @patch('srv.salt._modules.osd.OSDWeight.osd_safe_to_destroy') - def test_wait_timeout(self, od, ostd): + def test_wait_timeout(self, ostd, od, sleep): """ Check that wait can timeout """ @@ -160,14 +160,13 @@ def test_wait_timeout(self, od, ostd): osdw = osd.OSDWeight(0) osdw.osd_id = 0 osdw.settings = {'timeout': 1, 'delay': 1, 'osd_id': 0} - with pytest.raises(RuntimeError) as excinfo: - ret = osdw.wait() - assert 'Timeout expired' in str(excinfo.value) + ret = osdw.wait() + assert 'Timeout expired' in ret - @pytest.mark.skip(reason='skip') + @patch('time.sleep') @patch('srv.salt._modules.osd.OSDWeight.osd_df') @patch('srv.salt._modules.osd.OSDWeight.osd_safe_to_destroy') - def test_wait_loops(self, od, ostd): + def test_wait_loops(self, ostd, od, sleep): """ Check that wait does loop """ @@ -176,10 +175,9 @@ def test_wait_loops(self, od, ostd): with patch.object(osd.OSDWeight, "__init__", lambda self, _id: None): osdw = osd.OSDWeight(0) osdw.osd_id = 0 - osdw.settings = {'timeout': 1, 'delay': 1, 'osd_id': 0} - with pytest.raises(RuntimeError) as excinfo: - ret = osdw.wait() - assert ostd.call_count == 2 + osdw.settings = {'timeout': 2, 'delay': 1, 'osd_id': 0} + ret = osdw.wait() + assert ostd.call_count == 2 class TestOSDConfig(): @@ -590,6 +588,47 @@ def __init__(self, **kwargs): class TestOSDPartitions(): + def test_clean_skips(self): + kwargs = {'format': 'none'} + osd_config = OSDConfig(**kwargs) + + osdp = osd.OSDPartitions(osd_config) + osdp._find_paths = mock.Mock() + osdp.clean() + assert osdp._find_paths.call_count == 0 + + @mock.patch('srv.salt._modules.osd._find_paths') + def test_clean_no_paths(self, mock_fp): + osd_config = OSDConfig() + + osdp = osd.OSDPartitions(osd_config) + mock_fp.return_value = 0 + osdp.clean() + assert mock_fp.call_count == 1 + + @mock.patch('srv.salt._modules.osd._find_paths') + def test_clean(self, mock_fp): + osd_config = OSDConfig() + + osdp = osd.OSDPartitions(osd_config) + mock_fp.return_value = ['/dev/sda1'] + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (0, "out", "err") + osdp.clean() + assert mock_fp.call_count == 1 + assert osd.__salt__['helper.run'].call_count == 1 + + @mock.patch('srv.salt._modules.osd._find_paths') + def test_clean_raises_exception(self, mock_fp): + osd_config = OSDConfig() + + osdp = osd.OSDPartitions(osd_config) + mock_fp.return_value = ['/dev/sda1'] + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (1, "out", "err") + with pytest.raises(RuntimeError) as excinfo: + osdp.clean() + @mock.patch('srv.salt._modules.osd.OSDPartitions._xfs_partitions') def test_partition_filestore(self, xfs_part_mock): kwargs = {'format': 'filestore'} @@ -1866,6 +1905,25 @@ def test_prepare(self, fs_mock, cn_mock, fsid_mock, osdc_o): assert "--fs-type xfs" in ret assert "--dmcrypt" in ret + @mock.patch('srv.salt._modules.osd.OSDCommands._fsid') + @mock.patch('srv.salt._modules.osd.OSDCommands._cluster_name') + @mock.patch('srv.salt._modules.osd.OSDCommands._filestore_args') + def test_prepare_reuse_id(self, fs_mock, cn_mock, fsid_mock, osdc_o): + """ + Given there is a device defined + And it's filestore + And args are populated + Expect --osd-id to be part of cmd + """ + kwargs = {'format': 'filestore'} + fs_mock.return_value = 'filestore_args' + cn_mock.return_value = 'ceph' + fsid_mock.return_value = '0000-0000-0000-0000-0000' + osd_config = OSDConfig(**kwargs) + obj = osdc_o(osd_config) + ret = obj.prepare(osd_id=1) + assert "--osd-id" in ret + @mock.patch('srv.salt._modules.osd.OSDCommands._fsid') @mock.patch('srv.salt._modules.osd.OSDCommands._cluster_name') @mock.patch('srv.salt._modules.osd.OSDCommands._filestore_args') @@ -2034,6 +2092,1039 @@ def test_activate_3(self, osdc_o): def test_detect(self): pass +class TestOSDRemove(): + + @patch('osd.OSDRemove.set_partitions') + def test_keyring_set(self, mock_sp): + mock_device = mock.Mock() + keyring = '/etc/ceph/ceph.client.storage.keyring' + osdr = osd.OSDRemove(1, mock_device, None, None, keyring=keyring) + assert osdr.keyring == keyring + + @patch('osd.OSDRemove.set_partitions') + def test_client_set(self, mock_sp): + # Getting exception complaint from mock class + mock_device1 = mock.Mock() + client = 'client.storage' + osdr = osd.OSDRemove(1, mock_device1, None, None, client=client) + assert osdr.client == client + + def test_set_partitions_from_osd(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + mock_grains = mock.Mock() + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + result = osdr.set_partitions() + assert result == partitions + + def test_set_partitions_from_grains(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = None + mock_grains = mock.Mock() + + osd.__grains__ = {'ceph': {'1': {'partitions': partitions}}} + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + result = osdr.set_partitions() + assert result == partitions + + def test_set_partitions_from_grains_missing_id(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = None + mock_grains = mock.Mock() + + osd.__grains__ = {'ceph': {'2': {'partitions': partitions}}} + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + result = osdr.set_partitions() + assert result == None + + def test_remove_missing_id(self): + partitions = {} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + mock_grains = mock.Mock() + + osd.__grains__ = {'ceph': {'1': {'partitions': partitions}}, + 'id': 'data1.ceph'} + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + result = osdr.remove() + print result + assert "OSD 1 is not present" in result + + def test_remove_when_empty_fails(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_grains = mock.Mock() + mock_grains.delete.return_value = "" + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + osdr.empty = mock.Mock() + osdr.empty.return_value = "Reweight failed" + result = osdr.remove() + assert result == "Reweight failed" + + @patch('srv.salt._modules.osd.update_destroyed') + def test_remove_when_terminate_fails(self, mock_ud): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_grains = mock.Mock() + mock_grains.delete.return_value = "" + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + osdr.empty = mock.Mock() + osdr.empty.return_value = "" + osdr.terminate = mock.Mock() + osdr.terminate.return_value = "Failed to terminate OSD" + + result = osdr.remove() + assert result == "Failed to terminate OSD" + + def test_remove_when_mark_destroyed_fails(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_grains = mock.Mock() + mock_grains.delete.return_value = "" + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + osdr.empty = mock.Mock() + osdr.empty.return_value = "" + osdr.terminate = mock.Mock() + osdr.terminate.return_value = "" + osdr.mark_destroyed = mock.Mock() + osdr.mark_destroyed.return_value = False + + result = osdr.remove() + assert "Failed to mark OSD" in result + + @patch('srv.salt._modules.osd.update_destroyed') + def test_remove_when_update_destroyed_fails(self, mock_ud): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_grains = mock.Mock() + mock_grains.delete.return_value = "" + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + osdr.empty = mock.Mock() + osdr.empty.return_value = "" + osdr.terminate = mock.Mock() + osdr.terminate.return_value = "" + osdr.mark_destroyed = mock.Mock() + osdr.mark_destroyed.return_value = True + + osdr._osd_disk = mock.Mock() + osdr._osd_disk.return_value = '/dev/sda' + + mock_ud.return_value = "Failed to record OSD" + + result = osdr.remove() + assert "Failed to record OSD" in result + + @patch('srv.salt._modules.osd.update_destroyed') + def test_remove_when_unmount_fails(self, mock_ud): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_grains = mock.Mock() + mock_grains.delete.return_value = "" + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + osdr.empty = mock.Mock() + osdr.empty.return_value = "" + osdr.terminate = mock.Mock() + osdr.terminate.return_value = "" + osdr.mark_destroyed = mock.Mock() + osdr.mark_destroyed.return_value = True + + osdr._osd_disk = mock.Mock() + osdr._osd_disk.return_value = '/dev/sda' + + mock_ud.return_value = "" + + osdr.unmount = mock.Mock() + osdr.unmount.return_value = "Unmount failed" + + result = osdr.remove() + assert result == "Unmount failed" + + @patch('srv.salt._modules.osd.update_destroyed') + def test_remove_when_wipe_fails(self, mock_ud): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_grains = mock.Mock() + mock_grains.delete.return_value = "" + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + osdr.empty = mock.Mock() + osdr.empty.return_value = "" + osdr.terminate = mock.Mock() + osdr.terminate.return_value = "" + osdr.mark_destroyed = mock.Mock() + osdr.mark_destroyed.return_value = True + + osdr._osd_disk = mock.Mock() + osdr._osd_disk.return_value = '/dev/sda' + + mock_ud.return_value = "" + + osdr.unmount = mock.Mock() + osdr.unmount.return_value = "" + osdr.wipe = mock.Mock() + osdr.wipe.return_value = "Failed to wipe partition" + + result = osdr.remove() + assert result == "Failed to wipe partition" + + @patch('srv.salt._modules.osd.update_destroyed') + def test_remove_when_destroy_fails(self, mock_ud): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_grains = mock.Mock() + mock_grains.delete.return_value = "" + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + osdr.empty = mock.Mock() + osdr.empty.return_value = "" + osdr.terminate = mock.Mock() + osdr.terminate.return_value = "" + osdr.mark_destroyed = mock.Mock() + osdr.mark_destroyed.return_value = True + + osdr._osd_disk = mock.Mock() + osdr._osd_disk.return_value = '/dev/sda' + + mock_ud.return_value = "" + + osdr.unmount = mock.Mock() + osdr.unmount.return_value = "" + osdr.wipe = mock.Mock() + osdr.wipe.return_value = "" + osdr.destroy = mock.Mock() + osdr.destroy.return_value = "Failed to destroy OSD" + + result = osdr.remove() + assert result == "Failed to destroy OSD" + + @patch('srv.salt._modules.osd.update_destroyed') + def test_remove_works(self, mock_ud): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_grains = mock.Mock() + mock_grains.delete.return_value = "" + osdr = osd.OSDRemove(1, mock_device, None, mock_grains) + osdr.empty = mock.Mock() + osdr.empty.return_value = "" + osdr.terminate = mock.Mock() + osdr.terminate.return_value = "" + osdr.mark_destroyed = mock.Mock() + osdr.mark_destroyed.return_value = True + + osdr._osd_disk = mock.Mock() + osdr._osd_disk.return_value = '/dev/sda' + + mock_ud.return_value = "" + + osdr.unmount = mock.Mock() + osdr.unmount.return_value = "" + + osdr.wipe = mock.Mock() + osdr.wipe.return_value = "" + osdr.destroy = mock.Mock() + osdr.destroy.return_value = "" + osdr._grains = mock.Mock() + osdr._grains.delete.return_value = "" + + result = osdr.remove() + assert result == "" + + @patch('srv.salt._modules.osd.update_destroyed') + def test_remove_force_works(self, mock_ud): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_grains = mock.Mock() + mock_grains.delete.return_value = "" + osdr = osd.OSDRemove(1, mock_device, None, mock_grains, force=True) + osdr.empty = mock.Mock() + osdr.empty.return_value = "" + osdr.terminate = mock.Mock() + osdr.terminate.return_value = "" + osdr.mark_destroyed = mock.Mock() + osdr.mark_destroyed.return_value = True + + osdr._osd_disk = mock.Mock() + osdr._osd_disk.return_value = '/dev/sda' + + mock_ud.return_value = "" + + osdr.unmount = mock.Mock() + osdr.unmount.return_value = "" + + osdr.wipe = mock.Mock() + osdr.wipe.return_value = "" + osdr.destroy = mock.Mock() + osdr.destroy.return_value = "" + osdr._grains = mock.Mock() + osdr._grains.delete.return_value = "" + + result = osdr.remove() + assert result == "" + + def test_empty(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_weight = mock.Mock() + mock_weight.save.return_value = "" + mock_weight.reweight.return_value = (0, "out", "err") + mock_weight.wait.return_value = "" + + osdr = osd.OSDRemove(1, mock_device, mock_weight, None) + result = osdr.empty() + assert result == "" + + def test_empty_fails(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_weight = mock.Mock() + mock_weight.save.return_value = "" + mock_weight.reweight.return_value = (1, "out", "err") + mock_weight.wait.return_value = "Reweight failed" + + osdr = osd.OSDRemove(1, mock_device, mock_weight, None) + result = osdr.empty() + assert result == "Reweight failed" + + @patch('time.sleep') + def test_terminate(self, mock_sleep): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (1, "out", "err") + result = osdr.terminate() + assert result == "" + + @patch('time.sleep') + def test_terminate_fails(self, mock_sleep): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (0, "out", "err") + result = osdr.terminate() + assert "Failed to terminate OSD" in result + + fs = fake_fs.FakeFilesystem() + f_os = fake_fs.FakeOsModule(fs) + f_open = fake_fs.FakeFileOpen(fs) + + @patch('os.rmdir') + @patch('__builtin__.open', new=f_open) + def test_unmount(self, mock_rmdir): + TestOSDRemove.fs.CreateFile('/proc/mounts', + contents='''/dev/sda1 /var/lib/ceph/osd/ceph-1 rest\n''') + + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + osdr._mounted = mock.Mock() + osdr._mounted.return_value = ['/dev/sda1'] + + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (0, "out", "err") + + result = osdr.unmount() + + TestOSDRemove.fs.RemoveFile('/proc/mounts') + assert result == "" and mock_rmdir.call_count == 1 + + @patch('__builtin__.open', new=f_open) + def test_unmount_fails(self): + TestOSDRemove.fs.CreateFile('/proc/mounts', + contents='''/dev/sda1 /var/lib/ceph/osd/ceph-1 rest\n''') + + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + osdr._mounted = mock.Mock() + osdr._mounted.return_value = ['/dev/sda1'] + + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (1, "out", "err") + + result = osdr.unmount() + + TestOSDRemove.fs.RemoveFile('/proc/mounts') + assert "Unmount failed" in result + + @patch('__builtin__.open', new=f_open) + def test_unmount_finds_no_match(self): + TestOSDRemove.fs.CreateFile('/proc/mounts', + contents='''/dev/sdb1 /var/lib/ceph/osd/ceph-1 rest\n''') + + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + osdr._mounted = mock.Mock() + osdr._mounted.return_value = [] + + result = osdr.unmount() + + TestOSDRemove.fs.RemoveFile('/proc/mounts') + assert result == "" + + # Need /dev/dm tests once we fix the missing cases + + @patch('srv.salt._modules.osd.readlink') + def test_mounted_osd(self, mock_rl): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + mock_rl.return_value = '/dev/sda1' + result = osdr._mounted() + assert '/dev/sda1' in result + + @patch('srv.salt._modules.osd.readlink') + def test_mounted_lockbox(self, mock_rl): + partitions = {'lockbox': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + mock_rl.return_value = '/dev/sda1' + result = osdr._mounted() + assert '/dev/sda1' in result + + def test_mounted_none(self): + partitions = {} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + result = osdr._mounted() + assert result == [] + + def test_wipe_with_no_partitions(self): + partitions = {} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + result = osdr.wipe() + assert result == "" + + + @patch('os.path.exists', new=f_os.path.exists) + def test_wipe_with_missing_partitions(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + result = osdr.wipe() + assert result == "" + + @patch('os.path.exists', new=f_os.path.exists) + def test_wipe(self): + TestOSDRemove.fs.CreateFile('/dev/sda1') + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (0, "out", "err") + result = osdr.wipe() + TestOSDRemove.fs.RemoveFile('/dev/sda1') + assert result == "" + + @patch('os.path.exists', new=f_os.path.exists) + def test_wipe_fails(self): + TestOSDRemove.fs.CreateFile('/dev/sda1') + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (1, "out", "err") + result = osdr.wipe() + TestOSDRemove.fs.RemoveFile('/dev/sda1') + assert "Failed to wipe partition" in result + + def test_destroy(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + osdr._osd_disk = mock.Mock() + osdr._osd_disk.return_value = '/dev/sda' + osdr._delete_partitions = mock.Mock() + osdr._delete_partitions.return_value = "" + osdr._wipe_gpt_backups = mock.Mock() + osdr._wipe_gpt_backups.return_value = "" + + osdr._delete_osd = mock.Mock() + osdr._delete_osd.return_value = "" + + osdr._settle = mock.Mock() + result = osdr.destroy() + assert result == "" + + def test_destroy_fails_partition_delete(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + osdr._osd_disk = mock.Mock() + osdr._osd_disk.return_value = '/dev/sda' + osdr._delete_partitions = mock.Mock() + osdr._delete_partitions.return_value = "Failed to delete partition" + result = osdr.destroy() + assert result == "Failed to delete partition" + + def test_destroy_fails_osd_delete(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + osdr._osd_disk = mock.Mock() + osdr._osd_disk.return_value = '/dev/sda' + osdr._delete_partitions = mock.Mock() + osdr._delete_partitions.return_value = "" + osdr._wipe_gpt_backups = mock.Mock() + osdr._wipe_gpt_backups.return_value = "" + + osdr._delete_osd = mock.Mock() + osdr._delete_osd.return_value = "Failed to delete OSD" + + result = osdr.destroy() + assert result == "Failed to delete OSD" + + @patch('srv.salt._modules.osd.split_partition') + def test_osd_disk(self, mock_sp): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_sp.return_value = ('/dev/sda', '1') + osdr = osd.OSDRemove(1, mock_device, None, None) + result = osdr._osd_disk() + assert result == "/dev/sda" + + @patch('srv.salt._modules.osd.split_partition') + def test_osd_disk_with_lockbox(self, mock_sp): + partitions = {'lockbox': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_sp.return_value = ('/dev/sda', '1') + osdr = osd.OSDRemove(1, mock_device, None, None) + result = osdr._osd_disk() + assert result == "/dev/sda" + + @patch('srv.salt._modules.osd.readlink') + @patch('os.path.exists', new=f_os.path.exists) + def test_delete_partitions_with_standalone_osd(self, mock_rl): + TestOSDRemove.fs.CreateFile('/dev/sda1') + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_rl.return_value = '/dev/sda1' + osdr = osd.OSDRemove(1, mock_device, None, None) + + osdr.osd_disk = '/dev/sda' + + result = osdr._delete_partitions() + TestOSDRemove.fs.RemoveFile('/dev/sda1') + assert result == "" + + @patch('time.sleep') + @patch('srv.salt._modules.osd.readlink') + @patch('os.path.exists', new=f_os.path.exists) + def test_delete_partitions_with_nvme(self, mock_rl, mock_sleep): + TestOSDRemove.fs.CreateFile('/dev/nvme0n1') + partitions = {'osd': '/dev/nvme0n1p1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_rl.return_value = '/dev/nvme0n1p1' + osdr = osd.OSDRemove(1, mock_device, None, None) + + osdr.osd_disk = '/dev/nvme0n1' + + result = osdr._delete_partitions() + TestOSDRemove.fs.RemoveFile('/dev/nvme0n1') + assert result == "" + + @patch('srv.salt._modules.osd.split_partition') + @patch('srv.salt._modules.osd.readlink') + @patch('os.path.exists', new=f_os.path.exists) + def test_delete_partitions_working(self, mock_rl, mock_sp): + TestOSDRemove.fs.CreateFile('/dev/sdb1') + partitions = {'journal': '/dev/sdb1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_rl.return_value = '/dev/sdb1' + mock_sp.return_value = ('/dev/sdb', '1') + osdr = osd.OSDRemove(1, mock_device, None, None) + + osdr.osd_disk = '/dev/sda' + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (0, "out", "err") + + result = osdr._delete_partitions() + TestOSDRemove.fs.RemoveFile('/dev/sdb1') + assert result == "" + + @patch('srv.salt._modules.osd.split_partition') + @patch('srv.salt._modules.osd.readlink') + @patch('os.path.exists', new=f_os.path.exists) + def test_delete_partitions_fails(self, mock_rl, mock_sp): + TestOSDRemove.fs.CreateFile('/dev/sdb1') + partitions = {'journal': '/dev/sdb1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + mock_rl.return_value = '/dev/sdb1' + mock_sp.return_value = ('/dev/sdb', '1') + osdr = osd.OSDRemove(1, mock_device, None, None) + + osdr.osd_disk = '/dev/sda' + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (1, "out", "err") + + result = osdr._delete_partitions() + TestOSDRemove.fs.RemoveFile('/dev/sdb1') + assert "Failed to delete partition" in result + + def test_mark_destroyed(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (0, "out", "err") + + result = osdr.mark_destroyed() + assert result == True + + def test_mark_destroyed_with_keyring_and_client(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + keyring = '/etc/ceph/ceph.client.storage.keyring' + client = 'client.storage' + osdr = osd.OSDRemove(1, mock_device, None, None, keyring=keyring, client=client) + + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (0, "out", "err") + + result = osdr.mark_destroyed() + assert result == True + + def test_mark_destroyed_fails(self): + partitions = {'osd': '/dev/sda1'} + mock_device = mock.Mock() + mock_device.partitions.return_value = partitions + + osdr = osd.OSDRemove(1, mock_device, None, None) + + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (1, "out", "err") + + result = osdr.mark_destroyed() + assert result == False + +class TestOSDDestroyed(): + + fs = fake_fs.FakeFilesystem() + f_os = fake_fs.FakeOsModule(fs) + f_open = fake_fs.FakeFileOpen(fs) + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_update(self): + filename = "/etc/ceph/destroyedOSDs.yml" + TestOSDDestroyed.f_os.makedirs("/etc/ceph") + + osdd = osd.OSDDestroyed() + osdd._by_path = mock.Mock() + osdd._by_path.return_value = '/dev/disk/by-path/virtio-pci-0000:00:04.0' + + result = osdd.update('/dev/sda', 1) + contents = TestOSDDestroyed.f_open(filename).read() + + TestOSDDestroyed.fs.RemoveFile(filename) + TestOSDDestroyed.f_os.rmdir("/etc/ceph") + TestOSDDestroyed.f_os.rmdir("/etc") + assert result == "" and contents == "/dev/disk/by-path/virtio-pci-0000:00:04.0: 1\n" + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_update_with_no_by_path(self): + filename = "/etc/ceph/destroyedOSDs.yml" + TestOSDDestroyed.f_os.makedirs("/etc/ceph") + + osdd = osd.OSDDestroyed() + osdd._by_path = mock.Mock() + osdd._by_path.return_value = None + + result = osdd.update('/dev/sda', 1) + contents = TestOSDDestroyed.f_open(filename).read() + + TestOSDDestroyed.fs.RemoveFile(filename) + TestOSDDestroyed.f_os.rmdir("/etc/ceph") + TestOSDDestroyed.f_os.rmdir("/etc") + assert "Device /dev/sda is missing" in result and contents == "/dev/sda: 1\n" + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_update_entry_exists(self): + filename = "/etc/ceph/destroyedOSDs.yml" + TestOSDDestroyed.fs.CreateFile(filename, contents="""/dev/sda1: '1'""") + + osdd = osd.OSDDestroyed() + result = osdd.update('/dev/sda', 1) + + TestOSDDestroyed.fs.RemoveFile(filename) + assert result == "" + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_update_force(self): + filename = "/etc/ceph/destroyedOSDs.yml" + + osdd = osd.OSDDestroyed() + osdd._by_path = mock.Mock() + osdd._by_path.return_value = None + + result = osdd.update('/dev/sda', 1, force=True) + contents = TestOSDDestroyed.f_open(filename).read() + + TestOSDDestroyed.fs.RemoveFile(filename) + assert result == "" and contents == "/dev/sda: 1\n" + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_get(self): + filename = "/etc/ceph/destroyedOSDs.yml" + TestOSDDestroyed.fs.CreateFile(filename, contents="""/dev/disk/by-path/virtio-pci-0000:00:04.0: '1'""") + osdd = osd.OSDDestroyed() + + osdd._by_path = mock.Mock() + osdd._by_path.return_value = '/dev/disk/by-path/virtio-pci-0000:00:04.0' + + result = osdd.get('/dev/disk/by-path/virtio-pci-0000:00:04.0') + TestOSDDestroyed.fs.RemoveFile(filename) + assert result == '1' + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_get_no_match(self): + filename = "/etc/ceph/destroyedOSDs.yml" + TestOSDDestroyed.fs.CreateFile(filename, contents="""/dev/disk/by-path/virtio-pci-0000:00:04.0: '1'""") + osdd = osd.OSDDestroyed() + + osdd._by_path = mock.Mock() + osdd._by_path.return_value = '/dev/disk/by-path/virtio-pci-0000:00:10.0' + + result = osdd.get('/dev/disk/by-path/virtio-pci-0000:00:10.0') + TestOSDDestroyed.fs.RemoveFile(filename) + assert result is "" + + def test_by_path(self): + osdd = osd.OSDDestroyed() + + output = """ + /dev/disk/by-path/pci-0000:00:1f.2-scsi-1:0:0:0 + /dev/disk/by-path/pci-0000:00:1f.2-ata-2""" + + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (0, output, "err") + + result = osdd._by_path("/dev/sda") + assert result == "/dev/disk/by-path/pci-0000:00:1f.2-scsi-1:0:0:0" + + def test_by_path_no_match(self): + osdd = osd.OSDDestroyed() + + output = "" + + osd.__salt__['helper.run'] = mock.Mock() + osd.__salt__['helper.run'].return_value = (0, output, "err") + + result = osdd._by_path("/dev/sda") + assert result is "" + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_remove(self): + filename = "/etc/ceph/destroyedOSDs.yml" + TestOSDDestroyed.fs.CreateFile(filename, contents="""/dev/disk/by-path/virtio-pci-0000:00:04.0: '1'""") + + osdd = osd.OSDDestroyed() + osdd._by_path = mock.Mock() + osdd._by_path.return_value = "/dev/disk/by-path/virtio-pci-0000:00:04.0" + + osdd.remove('/dev/sda') + + contents = TestOSDDestroyed.f_open(filename).read() + TestOSDDestroyed.fs.RemoveFile(filename) + assert contents == "{}\n" + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_remove_original_device(self): + filename = "/etc/ceph/destroyedOSDs.yml" + TestOSDDestroyed.fs.CreateFile(filename, contents="""/dev/sda: '1'""") + + osdd = osd.OSDDestroyed() + osdd._by_path = mock.Mock() + osdd._by_path.return_value = "/dev/disk/by-path/virtio-pci-0000:00:04.0" + + osdd.remove('/dev/sda') + + contents = TestOSDDestroyed.f_open(filename).read() + TestOSDDestroyed.fs.RemoveFile(filename) + assert contents == "{}\n" + + @patch('os.path.exists', new=f_os.path.exists) + def test_remove_missing_file(self): + osdd = osd.OSDDestroyed() + osdd._by_path = mock.Mock() + osdd._by_path.return_value = "/dev/disk/by-path/virtio-pci-0000:00:04.0" + + result = osdd.remove('/dev/sda') + assert result is None + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_dump(self): + filename = "/etc/ceph/destroyedOSDs.yml" + contents = "/dev/sda: '1'" + TestOSDDestroyed.fs.CreateFile(filename, contents=contents) + + osdd = osd.OSDDestroyed() + results = osdd.dump() + TestOSDDestroyed.fs.RemoveFile(filename) + assert results == {'/dev/sda': '1'} + + @patch('os.path.exists', new=f_os.path.exists) + def test_dump_missing_file(self): + osdd = osd.OSDDestroyed() + results = osdd.dump() + assert results is "" + +class TestOSDGrains(): + + fs = fake_fs.FakeFilesystem() + f_os = fake_fs.FakeOsModule(fs) + f_open = fake_fs.FakeFileOpen(fs) + f_glob = fake_glob.FakeGlobModule(fs) + + @patch('glob.glob', new=f_glob.glob) + def test_retain(self): + mock_device = mock.Mock() + mock_device.partitions.return_value = {'block': '/dev/vdb2', + 'osd': '/dev/vdb1'} + mock_device.osd_fsid.return_value = '66758302-deb5-4078-b871-988c54f0eb57' + filename = "/var/lib/ceph/osd/ceph-0/type" + TestOSDGrains.fs.CreateFile(filename) + + osdg = osd.OSDGrains(mock_device) + osdg._grains = mock.Mock() + osdg.retain() + TestOSDGrains.fs.RemoveFile(filename) + TestOSDGrains.f_os.rmdir("/var/lib/ceph/osd/ceph-0") + assert osdg._grains.call_count == 1 + osdg._grains.assert_called_with({'0': {'fsid': '66758302-deb5-4078-b871-988c54f0eb57', 'partitions': {'block': '/dev/vdb2', 'osd': '/dev/vdb1'}}}) + + @patch('glob.glob', new=f_glob.glob) + def test_retain_no_osds(self): + mock_device = mock.Mock() + mock_device.partitions.return_value = {'block': '/dev/vdb2', + 'osd': '/dev/vdb1'} + mock_device.osd_fsid.return_value = '66758302-deb5-4078-b871-988c54f0eb57' + osdg = osd.OSDGrains(mock_device) + osdg._grains = mock.Mock() + osdg.retain() + assert osdg._grains.call_count == 1 + osdg._grains.assert_called_with({}) + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_delete_no_file(self): + mock_device = mock.Mock() + mock_device.partitions.return_value = {'block': '/dev/vdb2', + 'osd': '/dev/vdb1'} + mock_device.osd_fsid.return_value = '66758302-deb5-4078-b871-988c54f0eb57' + osdg = osd.OSDGrains(mock_device) + osdg._update_grains = mock.Mock() + osdg.delete(1) + assert osdg._update_grains.call_count == 0 + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_delete_empty_file(self): + filename = "/etc/salt/grains" + TestOSDGrains.fs.CreateFile(filename) + mock_device = mock.Mock() + mock_device.partitions.return_value = {'block': '/dev/vdb2', + 'osd': '/dev/vdb1'} + mock_device.osd_fsid.return_value = '66758302-deb5-4078-b871-988c54f0eb57' + osdg = osd.OSDGrains(mock_device) + osdg._update_grains = mock.Mock() + osdg.delete(1) + TestOSDGrains.fs.RemoveFile(filename) + assert osdg._update_grains.call_count == 0 + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_delete(self): + filename = "/etc/salt/grains" + contents = """ + ceph: + '10': + fsid: 751f74ca-dfe2-42af-9457-94b286793abf + partitions: + block: /dev/vdc2 + osd: /dev/vdc1 + '17': + fsid: 28e231cd-cd01-40f9-aa47-e332ccf73e35 + partitions: + block: /dev/vdd2 + osd: /dev/vdd1 + """ + + TestOSDGrains.fs.CreateFile(filename, contents=contents) + mock_device = mock.Mock() + mock_device.partitions.return_value = {'block': '/dev/vdb2', + 'osd': '/dev/vdb1'} + mock_device.osd_fsid.return_value = '66758302-deb5-4078-b871-988c54f0eb57' + osdg = osd.OSDGrains(mock_device) + osdg._update_grains = mock.Mock() + osdg.delete(10) + TestOSDGrains.fs.RemoveFile(filename) + assert osdg._update_grains.call_count == 1 + expected = {'ceph': + {'17': {'fsid': '28e231cd-cd01-40f9-aa47-e332ccf73e35', + 'partitions': {'block': '/dev/vdd2', + 'osd': '/dev/vdd1'}}}} + osdg._update_grains.assert_called_with(expected) + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_grains_no_file(self): + mock_device = mock.Mock() + mock_device.partitions.return_value = {'block': '/dev/vdb2', + 'osd': '/dev/vdb1'} + mock_device.osd_fsid.return_value = '66758302-deb5-4078-b871-988c54f0eb57' + osdg = osd.OSDGrains(mock_device) + osdg._update_grains = mock.Mock() + osdg._grains("data") + assert osdg._update_grains.call_count == 1 + expected = {'ceph': 'data'} + osdg._update_grains.assert_called_with(expected) + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_grains(self): + filename = "/etc/salt/grains" + contents = """ + deepsea: + - default + """ + TestOSDGrains.fs.CreateFile(filename, contents=contents) + mock_device = mock.Mock() + mock_device.partitions.return_value = {'block': '/dev/vdb2', + 'osd': '/dev/vdb1'} + mock_device.osd_fsid.return_value = '66758302-deb5-4078-b871-988c54f0eb57' + osdg = osd.OSDGrains(mock_device) + osdg._update_grains = mock.Mock() + osdg._grains("data") + TestOSDGrains.fs.RemoveFile(filename) + assert osdg._update_grains.call_count == 1 + expected = {'ceph': 'data', 'deepsea': ['default']} + osdg._update_grains.assert_called_with(expected) + + @patch('os.path.exists', new=f_os.path.exists) + @patch('__builtin__.open', new=f_open) + def test_grains_no_update(self): + filename = "/etc/salt/grains" + contents = """ + ceph: + '17': + fsid: 28e231cd-cd01-40f9-aa47-e332ccf73e35 + partitions: + block: /dev/vdd2 + osd: /dev/vdd1 + """ + + TestOSDGrains.fs.CreateFile(filename, contents=contents) + mock_device = mock.Mock() + mock_device.partitions.return_value = {'block': '/dev/vdb2', + 'osd': '/dev/vdb1'} + mock_device.osd_fsid.return_value = '66758302-deb5-4078-b871-988c54f0eb57' + osdg = osd.OSDGrains(mock_device) + osdg._update_grains = mock.Mock() + storage = {'17': {'fsid': '28e231cd-cd01-40f9-aa47-e332ccf73e35', + 'partitions': {'block': '/dev/vdd2', + 'osd': '/dev/vdd1'}}} + osdg._grains(storage) + TestOSDGrains.fs.RemoveFile(filename) + assert osdg._update_grains.call_count == 0 + + @patch('__builtin__.open', new=f_open) + def test_update_grains(self): + filename = "/etc/salt/grains" + mock_device = mock.Mock() + mock_device.partitions.return_value = {'block': '/dev/vdb2', + 'osd': '/dev/vdb1'} + mock_device.osd_fsid.return_value = '66758302-deb5-4078-b871-988c54f0eb57' + osdg = osd.OSDGrains(mock_device) + content = {'deepsea': ['default']} + osd.__salt__['saltutil.sync_grains'] = mock.Mock() + + osdg._update_grains(content) + contents = TestOSDGrains.f_open(filename).read() + expected = "deepsea:\n- default\n" + assert contents == expected + class TestOSDDevices(): def test_prefer_underscores(self): @@ -2510,6 +3601,7 @@ def test_is_incorrect_filestore_journal_wrong_size(self, readlink, osdc_o, helpe '''/dev/sdb /var/lib/ceph/osd/ceph-6 rest\n''') ret = obj.is_incorrect() assert ret == True + class TestCephPGS: def test_pg_value(self): @@ -2579,3 +3671,188 @@ def test_quiescent_delay_is_zero(self, pg_states, sleep): with pytest.raises(ValueError) as excinfo: ret = ceph_pgs.quiescent() assert 'The delay cannot be 0' in str(excinfo.value) + +class Test_report(): + + fs = fake_fs.FakeFilesystem() + f_os = fake_fs.FakeOsModule(fs) + + @patch('srv.salt._modules.osd._report_grains') + @patch('srv.salt._modules.osd._report_pillar') + @patch('srv.salt._modules.osd._report_original_pillar') + def test_report(self, mock_rop, mock_rp, mock_rg): + mock_rg.return_value = ([], []) + mock_rp.return_value = ([], []) + mock_rop.return_value = ([], []) + + result = osd.report() + assert result == "All configured OSDs are active" + + @patch('srv.salt._modules.osd._report_grains') + @patch('srv.salt._modules.osd._report_pillar') + @patch('srv.salt._modules.osd._report_original_pillar') + def test_report_not_human(self, mock_rop, mock_rp, mock_rg): + mock_rg.return_value = ([], []) + mock_rp.return_value = ([], []) + mock_rop.return_value = ([], []) + + result = osd.report(human=False) + assert 'unconfigured' in result + assert 'changed' in result + assert 'unmounted' in result + + + @patch('srv.salt._modules.osd._report_grains') + @patch('srv.salt._modules.osd._report_pillar') + @patch('srv.salt._modules.osd._report_original_pillar') + def test_report_unmounted(self, mock_rop, mock_rp, mock_rg): + mock_rg.return_value = ([], ['/dev/sda']) + mock_rp.return_value = ([], []) + mock_rop.return_value = ([], []) + + result = osd.report() + assert "No OSD mounted for" in result + + @patch('srv.salt._modules.osd._report_grains') + @patch('srv.salt._modules.osd._report_pillar') + @patch('srv.salt._modules.osd._report_original_pillar') + def test_report_unconfigured(self, mock_rop, mock_rp, mock_rg): + mock_rg.return_value = ([], []) + mock_rp.return_value = (['/dev/sda'], []) + mock_rop.return_value = ([], []) + + result = osd.report() + assert "No OSD configured for" in result + + @patch('srv.salt._modules.osd._report_grains') + @patch('srv.salt._modules.osd._report_pillar') + @patch('srv.salt._modules.osd._report_original_pillar') + def test_report_unconfigured_original(self, mock_rop, mock_rp, mock_rg): + mock_rg.return_value = ([], []) + mock_rp.return_value = ([], []) + mock_rop.return_value = (['/dev/sda'], []) + + result = osd.report() + assert "No OSD configured for" in result + + @patch('srv.salt._modules.osd._report_grains') + @patch('srv.salt._modules.osd._report_pillar') + @patch('srv.salt._modules.osd._report_original_pillar') + def test_report_changed(self, mock_rop, mock_rp, mock_rg): + mock_rg.return_value = ([], []) + mock_rp.return_value = ([], ['/dev/sda']) + mock_rop.return_value = ([], []) + + result = osd.report() + assert "Different configuration for" in result + + @patch('srv.salt._modules.osd._report_grains') + @patch('srv.salt._modules.osd._report_pillar') + @patch('srv.salt._modules.osd._report_original_pillar') + def test_report_changed_original(self, mock_rop, mock_rp, mock_rg): + mock_rg.return_value = ([], []) + mock_rp.return_value = ([], []) + mock_rop.return_value = ([], ['/dev/sda']) + + result = osd.report() + assert "Different configuration for" in result + + @patch('srv.salt._modules.osd.split_partition') + @mock.patch('srv.salt._modules.osd.readlink') + def test_report_grains_unmounted(self, mock_rl, mock_sp): + osd.__grains__ = {'ceph': {'1': {'partitions': {'osd': '/dev/sda1'}}}} + osd.__pillar__ = {} + + mock_rl.return_value = "/dev/sda1" + mock_sp.return_value = ("/dev/sda", "1") + active, unmounted = osd._report_grains() + assert unmounted == ["/dev/sda"] + assert active == ["/dev/sda"] + + @patch('os.path.exists', new=f_os.path.exists) + @patch('srv.salt._modules.osd.split_partition') + @mock.patch('srv.salt._modules.osd.readlink') + def test_report_grains_mounted(self, mock_rl, mock_sp): + osd.__grains__ = {'ceph': {'1': {'partitions': {'osd': '/dev/sda1'}}}} + osd.__pillar__ = {} + + filename = "/var/lib/ceph/osd/ceph-1/fsid" + Test_report.fs.CreateFile(filename) + mock_rl.return_value = "/dev/sda1" + mock_sp.return_value = ("/dev/sda", "1") + active, unmounted = osd._report_grains() + Test_report.fs.RemoveFile(filename) + assert unmounted == [] + assert active == ["/dev/sda"] + + # Add lockbox checks + + @patch('srv.salt._modules.osd.is_incorrect') + @mock.patch('srv.salt._modules.osd.readlink') + def test_report_pillar_unconfigured(self, mock_rl, mock_ii): + osd.__pillar__ = {'ceph': {'storage': {'osds': {'/dev/sda': {}}}}} + + mock_rl.return_value = "/dev/sda" + mock_ii.return_value = False + unconfigured, changed = osd._report_pillar(["/dev/sdb"]) + assert unconfigured == ["/dev/sda"] + assert changed == [] + + @patch('srv.salt._modules.osd.is_incorrect') + @mock.patch('srv.salt._modules.osd.readlink') + def test_report_pillar_configured_and_unchanged(self, mock_rl, mock_ii): + osd.__pillar__ = {'ceph': {'storage': {'osds': {'/dev/sda': {}}}}} + + mock_rl.return_value = "/dev/sda" + mock_ii.return_value = False + unconfigured, changed = osd._report_pillar(["/dev/sda"]) + assert unconfigured == [] + assert changed == [] + + @patch('srv.salt._modules.osd.is_incorrect') + @mock.patch('srv.salt._modules.osd.readlink') + def test_report_pillar_changed(self, mock_rl, mock_ii): + osd.__pillar__ = {'ceph': {'storage': {'osds': {'/dev/sda': {}}}}} + + mock_rl.return_value = "/dev/sda" + mock_ii.return_value = True + unconfigured, changed = osd._report_pillar(["/dev/sda"]) + assert unconfigured == [] + assert changed == ["/dev/sda"] + + @patch('srv.salt._modules.osd.is_incorrect') + @mock.patch('srv.salt._modules.osd.readlink') + def test_report_original_pillar_unconfigured(self, mock_rl, mock_ii): + osd.__pillar__ = {'storage': {'osds': ['/dev/sda'], + 'data+journals': {}}} + + mock_rl.return_value = "/dev/sda" + mock_ii.return_value = False + unconfigured, changed = osd._report_original_pillar(["/dev/sdb"]) + assert unconfigured == ["/dev/sda"] + assert changed == [] + + @patch('srv.salt._modules.osd.is_incorrect') + @mock.patch('srv.salt._modules.osd.readlink') + def test_report_original_pillar_configured_and_unchanged(self, mock_rl, mock_ii): + osd.__pillar__ = {'storage': {'osds': ['/dev/sda'], + 'data+journals': {}}} + + mock_rl.return_value = "/dev/sda" + mock_ii.return_value = False + unconfigured, changed = osd._report_original_pillar(["/dev/sda"]) + assert unconfigured == [] + assert changed == [] + + @patch('srv.salt._modules.osd.is_incorrect') + @mock.patch('srv.salt._modules.osd.readlink') + def test_report_original_pillar_changed(self, mock_rl, mock_ii): + osd.__pillar__ = {'storage': {'osds': ['/dev/sda'], + 'data+journals': {}}} + + mock_rl.return_value = "/dev/sda" + mock_ii.return_value = True + unconfigured, changed = osd._report_original_pillar(["/dev/sda"]) + assert unconfigured == [] + assert changed == ["/dev/sda"] + diff --git a/tests/unit/runners/test_advise.py b/tests/unit/runners/test_advise.py new file mode 100644 index 000000000..c183a2b7c --- /dev/null +++ b/tests/unit/runners/test_advise.py @@ -0,0 +1,24 @@ +from mock import patch, MagicMock +from srv.modules.runners import advise + + +class TestAdvise(): + """ + A class for checking notifications + """ + + def test_tidy(self): + report = {'data1.ceph': {'unconfigured': {'/dev/sdb', '/dev/sdc'}}} + result = advise._tidy('unconfigured', report) + expected = "data1.ceph: /dev/sdb, /dev/sdc\n" + assert result == expected + + def test_tidy_long(self): + report = {'data1.long.domain.name': + {'unconfigured': + {'/dev/disk/by-id/scsi-012345678901234567890123456789', + '/dev/disk/by-id/scsi-abcdefghijklmnopqrstuvwxyzabcd'}}} + result = advise._tidy('unconfigured', report) + expected = "\ndata1.long.domain.name:\n /dev/disk/by-id/scsi-012345678901234567890123456789\n /dev/disk/by-id/scsi-abcdefghijklmnopqrstuvwxyzabcd\n" + assert result == expected + diff --git a/tests/unit/runners/test_push.py b/tests/unit/runners/test_push.py index e42823816..4acf64baa 100644 --- a/tests/unit/runners/test_push.py +++ b/tests/unit/runners/test_push.py @@ -107,3 +107,16 @@ def test_organize(self, mock_stat): organized = p_d.organize('policy.cfg_trailing_and_leading_whitespace_and_trailing_comment') assert len(organized.keys()) == len(nodes) + + @patch('os.path.isfile', new=f_os.path.isfile) + def test_organize_function_missing_file(self): + result = push.organize() + assert result == "" + + @patch('os.path.isfile', new=f_os.path.isfile) + @patch('__builtin__.open', new=f_open) + def test_organize_function(self): + result = push.organize('policy.cfg') + assert result == {} + + diff --git a/tests/unit/runners/test_replace.py b/tests/unit/runners/test_replace.py new file mode 100644 index 000000000..8431aad96 --- /dev/null +++ b/tests/unit/runners/test_replace.py @@ -0,0 +1,21 @@ +from mock import patch, MagicMock +from srv.modules.runners import replace + + +class TestReplace(): + """ + A class for checking notifications + """ + + def test_find_host(self): + osd_list = {'data1.ceph': ['1', '2', '3'], + 'data2.ceph': ['4', '5', '6']} + result = replace._find_host(5, osd_list) + assert result == 'data2.ceph' + + def test_find_host_missing(self): + osd_list = {'data1.ceph': ['1', '2', '3'], + 'data2.ceph': ['4', '5', '6']} + result = replace._find_host(9, osd_list) + assert result == "" +