Skip to content

Commit

Permalink
openshift_checks: add property to track 'changed'
Browse files Browse the repository at this point in the history
Introduced the 'changed' property for checks that can make changes to
track whether they did or not. Rather than the check's own logic having
to track this and include it in the result hash, just set the property
and have the action plugin insert it in the result hash after running
(even if there is an exception).

Cleared out a lot of crufty "changed: false" hash entries.
  • Loading branch information
sosiouxme committed Jul 29, 2017
1 parent 3be2748 commit 6d1cc99
Show file tree
Hide file tree
Showing 12 changed files with 44 additions and 40 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,15 @@ def run(self, tmp=None, task_vars=None):
msg=str(e),
)

if check.changed:
r["changed"] = True
check_results[check_name] = r

if r.get("failed", False):
result["failed"] = True
result["msg"] = "One or more checks failed"
result["changed"] = any(r.get("changed") for r in check_results.values())
if any(r.get("failed") for r in check_results.values()):
result["failed"] = True
result["msg"] = "One or more checks failed"

result["changed"] = any(r.get("changed", False) for r in check_results.values())
return result

def load_known_checks(self, tmp, task_vars):
Expand Down
2 changes: 2 additions & 0 deletions roles/openshift_health_checker/openshift_checks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ def __init__(self, execute_module=None, task_vars=None, tmp=None):
self._execute_module = execute_module
self.task_vars = task_vars or {}
self.tmp = tmp
# set True when the check makes a change to the host so it can be reported to the user:
self.changed = False

@abstractproperty
def name(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,10 @@ def is_active(self):
return super(DockerImageAvailability, self).is_active() and has_valid_deployment_type

def run(self):
msg, failed, changed = self.ensure_dependencies()
msg, failed = self.ensure_dependencies()
if failed:
return {
"failed": True,
"changed": changed,
"msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
}

Expand All @@ -54,11 +53,11 @@ def run(self):

# exit early if all images were found locally
if not missing_images:
return {"changed": changed}
return {}

registries = self.known_docker_registries()
if not registries:
return {"failed": True, "msg": "Unable to retrieve any docker registries.", "changed": changed}
return {"failed": True, "msg": "Unable to retrieve any docker registries."}

available_images = self.available_images(missing_images, registries)
unavailable_images = set(missing_images) - set(available_images)
Expand All @@ -70,10 +69,9 @@ def run(self):
"One or more required Docker images are not available:\n {}\n"
"Configured registries: {}"
).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries)),
"changed": changed,
}

return {"changed": changed}
return {}

def required_images(self):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,21 +43,20 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
]

def run(self):
msg, failed, changed = self.ensure_dependencies()
msg, failed = self.ensure_dependencies()
if failed:
return {
"failed": True,
"changed": changed,
"msg": "Some dependencies are required in order to query docker storage on host:\n" + msg
}

# attempt to get the docker info hash from the API
docker_info = self.execute_module("docker_info", {})
if docker_info.get("failed"):
return {"failed": True, "changed": changed,
return {"failed": True,
"msg": "Failed to query Docker API. Is docker running on this host?"}
if not docker_info.get("info"): # this would be very strange
return {"failed": True, "changed": changed,
return {"failed": True,
"msg": "Docker API query missing info:\n{}".format(json.dumps(docker_info))}
docker_info = docker_info["info"]

Expand All @@ -68,7 +67,7 @@ def run(self):
"Detected unsupported Docker storage driver '{driver}'.\n"
"Supported storage drivers are: {drivers}"
).format(driver=driver, drivers=', '.join(self.storage_drivers))
return {"failed": True, "changed": changed, "msg": msg}
return {"failed": True, "msg": msg}

# driver status info is a list of tuples; convert to dict and validate based on driver
driver_status = {item[0]: item[1] for item in docker_info.get("DriverStatus", [])}
Expand All @@ -81,7 +80,6 @@ def run(self):
if driver in ['overlay', 'overlay2']:
result = self.check_overlay_support(docker_info, driver_status)

result['changed'] = result.get('changed', False) or changed
return result

def check_devicemapper_support(self, driver_status):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def run(self):
reason = etcdkeysize["module_stderr"]

msg = msg.format(host=etcd_host, reason=reason)
return {"failed": True, "changed": False, "msg": msg}
return {"failed": True, "msg": msg}

if etcdkeysize["size_limit_exceeded"]:
limit = self._to_gigabytes(etcd_imagedata_size_limit)
Expand All @@ -65,7 +65,7 @@ def run(self):
"Use the `oadm prune images` command to cleanup unused Docker images.")
return {"failed": True, "msg": msg.format(host=etcd_host, limit=limit)}

return {"changed": False}
return {}

@staticmethod
def _get_etcd_mountpath(ansible_mounts):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def run(self):
)
return {"failed": True, "msg": msg}

return {"changed": False}
return {}

def _etcd_mount_info(self):
ansible_mounts = self.get_var("ansible_mounts")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,17 @@ def run(self):
"curator",
)
if error:
return {"failed": True, "changed": False, "msg": error}
return {"failed": True, "msg": error}
check_error = self.check_curator(curator_pods)

if check_error:
msg = ("The following Curator deployment issue was found:"
"\n-------\n"
"{}".format(check_error))
return {"failed": True, "changed": False, "msg": msg}
return {"failed": True, "msg": msg}

# TODO(lmeyer): run it all again for the ops cluster
return {"failed": False, "changed": False, "msg": 'No problems found with Curator deployment.'}
return {"failed": False, "msg": 'No problems found with Curator deployment.'}

def check_curator(self, pods):
"""Check to see if curator is up and working. Returns: error string"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,17 +23,17 @@ def run(self):
"es",
)
if error:
return {"failed": True, "changed": False, "msg": error}
return {"failed": True, "msg": error}
check_error = self.check_elasticsearch(es_pods)

if check_error:
msg = ("The following Elasticsearch deployment issue was found:"
"\n-------\n"
"{}".format(check_error))
return {"failed": True, "changed": False, "msg": msg}
return {"failed": True, "msg": msg}

# TODO(lmeyer): run it all again for the ops cluster
return {"failed": False, "changed": False, "msg": 'No problems found with Elasticsearch deployment.'}
return {"failed": False, "msg": 'No problems found with Elasticsearch deployment.'}

def _not_running_elasticsearch_pods(self, es_pods):
"""Returns: list of pods that are not running, list of errors about non-running pods"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,17 @@ def run(self):
"fluentd",
)
if error:
return {"failed": True, "changed": False, "msg": error}
return {"failed": True, "msg": error}
check_error = self.check_fluentd(fluentd_pods)

if check_error:
msg = ("The following Fluentd deployment issue was found:"
"\n-------\n"
"{}".format(check_error))
return {"failed": True, "changed": False, "msg": msg}
return {"failed": True, "msg": msg}

# TODO(lmeyer): run it all again for the ops cluster
return {"failed": False, "changed": False, "msg": 'No problems found with Fluentd deployment.'}
return {"failed": False, "msg": 'No problems found with Fluentd deployment.'}

@staticmethod
def _filter_fluentd_labeled_nodes(nodes_by_name, node_selector):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def run(self):
"kibana",
)
if error:
return {"failed": True, "changed": False, "msg": error}
return {"failed": True, "msg": error}
check_error = self.check_kibana(kibana_pods)

if not check_error:
Expand All @@ -42,10 +42,10 @@ def run(self):
msg = ("The following Kibana deployment issue was found:"
"\n-------\n"
"{}".format(check_error))
return {"failed": True, "changed": False, "msg": msg}
return {"failed": True, "msg": msg}

# TODO(lmeyer): run it all again for the ops cluster
return {"failed": False, "changed": False, "msg": 'No problems found with Kibana deployment.'}
return {"failed": False, "msg": 'No problems found with Kibana deployment.'}

def _verify_url_internal(self, url):
"""
Expand Down
8 changes: 4 additions & 4 deletions roles/openshift_health_checker/openshift_checks/mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@ def ensure_dependencies(self):
"""
Ensure that docker-related packages exist, but not on atomic hosts
(which would not be able to install but should already have them).
Returns: msg, failed, changed
Returns: msg, failed
"""
if self.get_var("openshift", "common", "is_atomic"):
return "", False, False
return "", False

# NOTE: we would use the "package" module but it's actually an action plugin
# and it's not clear how to invoke one of those. This is about the same anyway:
Expand All @@ -49,5 +49,5 @@ def ensure_dependencies(self):
" {deps}\n{msg}"
).format(deps=',\n '.join(self.dependencies), msg=msg)
failed = result.get("failed", False) or result.get("rc", 0) != 0
changed = result.get("changed", False)
return msg, failed, changed
self.changed = result.get("changed", False)
return msg, failed
14 changes: 9 additions & 5 deletions roles/openshift_health_checker/test/action_plugin_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,15 @@
from openshift_checks import OpenShiftCheckException


def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None):
def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, changed=False):
"""Returns a new class that is compatible with OpenShiftCheck for testing."""

_name, _tags = name, tags

class FakeCheck(object):
name = _name
tags = _tags or []
changed = False

def __init__(self, execute_module=None, task_vars=None, tmp=None):
pass
Expand All @@ -22,6 +23,7 @@ def is_active(self):
return is_active

def run(self):
self.changed = changed
if run_exception is not None:
raise run_exception
return run_return
Expand Down Expand Up @@ -135,14 +137,15 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):


def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test', 'changed': True}
check_class = fake_check(run_return=check_return_value)
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value, changed=True)
monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])

result = plugin.run(tmp=None, task_vars=task_vars)

assert result['checks']['fake_check'] == check_return_value
assert changed(result['checks']['fake_check'])
assert not failed(result)
assert changed(result)
assert not skipped(result)
Expand All @@ -165,15 +168,16 @@ def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
exception_msg = 'fake check has an exception'
run_exception = OpenShiftCheckException(exception_msg)
check_class = fake_check(run_exception=run_exception)
check_class = fake_check(run_exception=run_exception, changed=True)
monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])

result = plugin.run(tmp=None, task_vars=task_vars)

assert failed(result['checks']['fake_check'], msg_has=exception_msg)
assert failed(result, msg_has=['failed'])
assert not changed(result)
assert changed(result['checks']['fake_check'])
assert changed(result)
assert not skipped(result)


Expand Down

0 comments on commit 6d1cc99

Please sign in to comment.