diff --git a/README.md b/README.md new file mode 100644 index 0000000..2fbbcde --- /dev/null +++ b/README.md @@ -0,0 +1,95 @@ +# Custom Monitor Docker component for Home Assistant + +[![maintainer](https://img.shields.io/badge/maintainer-ualex73-blue.svg?style=for-the-badge)](https://github.com/ualex73) + +## About + +This repository contains the Monitor Docker component I developed for monitor my Docker environment from [Home-Assistant](https://www.home-assistant.io). It is inspired by the Sander Huismans [Docker Monitor](https://github.com/Sanderhuisman/docker_monitor), where I switched mainly from threads to asyncio and put my own wishes/functionality in. Feel free to use the component and report bugs if you find them. If you want to contribute, please report a bug or pull request and I will reply as soon as possible. + +## Monitor Docker + +The Monitor Docker allows you to monitor Docker and container statistics and turn on/off containers. It can connected to the Docker daemon locally or remotely. When Home Assistant is used within a Docker container, the Docker daemon should be mounted as follows `-v /var/run/docker.sock:/var/run/docker.sock`. + +## Installation + +### Manual +- Copy directory `custom_components/monitor_docker` to your `/custom_components` directory. +- Configure with config below. +- Restart Home-Assistant. + +### Configuration + +To use the `monitor_docker` in your installation, add the following to your `configuration.yaml` file: + +```yaml +# Example configuration.yaml entry +monitor_docker: + - name: Docker + containers: + - appdaemon + - db-dsmr + - db-hass + - deconz + - dsmr + - hass + - influxdb + - mosquitto + - nodered + - unifi + rename: + appdaemon: AppDaemon + db-dsmr: "Database DSMR-Reader" + db-hass: Database Home Assistant + deconz: DeCONZ + domotiga: DomotiGa + dsmr: "DSMR-Reader" + hass: Home Assistant + influxdb: InfluxDB + mosquitto: Mosquitto + nodered: "Node-RED" + unifi: UniFi + sensorname: "{name}" + switchname: "{name}" + monitored_conditions: + - version + - containers_active + - containers_total + - status + - memory +``` + +#### Configuration variables + +| Parameter | Type | Description | +| -------------------- | ------------------------ | --------------------------------------------------------------------- | +| name | string (Required) | Client name of Docker daemon. Defaults to `Docker`. | +| url | string (Optional) | Host URL of Docker daemon. Defaults to `unix://var/run/docker.sock`. | +| scan_interval | time_period (Optional) | Update interval. Defaults to 10 seconds. | +| containers | list (Optional) | Array of containers to monitor. Defaults to all containers. | +| monitored_conditions | list (Optional) | Array of conditions to be monitored. Defaults to all conditions. | +| rename | dictionary (Optional) | Dictionary of containers to rename. Default no renaming. | +| sensorname | string (Optional) | Sensor string to format the name used in Home Assistant. Defaults to `Docker {name} {sensorname}`, where `{name}` is the container name and `{sensorname}` is e.g. Memory, Status, Network speed Up | +| switchname | string (optional) | Switch string to format the name used in Home Assistant. Defaults to `Docker {name}`, where `{name}` is the container name. | + +| Monitored Conditions | Description | Unit | +| --------------------------------- | ------------------------------- | ----- | +| version | Docker version | - | +| containers_total | Total number of containers | - | +| containers_running | Number of running containers | - | +| containers_cpu_percentage | CPU Usage | % | +| containers_memory | Memory usage | MB | +| containers_memory_percentage | Memory usage | % | +| status | Container status | - | +| uptime | Container start time | - | +| image | Container image | - | +| cpu_percentage | CPU usage | % | +| memory | Memory usage | MB | +| memory_percentage | Memory usage | % | +| network_speed_up | Network speed upstream | kB/s | +| network_speed_down | Network speed downstream | kB/s | +| network_total_up | Network total upstream | MB | +| network_total_down | Network total downstream | MB | + +## Credits + +* [Sanderhuisman](https://github.com/Sanderhuisman/docker_monitor) diff --git a/custom_components/monitor_docker/__init__.py b/custom_components/monitor_docker/__init__.py new file mode 100644 index 0000000..7d44743 --- /dev/null +++ b/custom_components/monitor_docker/__init__.py @@ -0,0 +1,102 @@ +"""Monitor Docker main component.""" + +import asyncio +import logging +import threading +import voluptuous as vol + +from datetime import timedelta + +import homeassistant.helpers.config_validation as cv + +from homeassistant.helpers.discovery import load_platform + +from .helpers import DockerAPI, DockerContainerAPI + +from homeassistant.const import ( + CONF_MONITORED_CONDITIONS, + CONF_NAME, + CONF_SCAN_INTERVAL, + CONF_URL, + EVENT_HOMEASSISTANT_STOP, +) + +from .const import ( + API, + CONF_CONTAINERS, + CONF_RENAME, + CONF_SENSORNAME, + CONF_SWITCHNAME, + CONFIG, + DOMAIN, + DEFAULT_NAME, + DEFAULT_SENSORNAME, + DEFAULT_SWITCHNAME, + MONITORED_CONDITIONS_LIST, +) + +_LOGGER = logging.getLogger(__name__) + +DEFAULT_SCAN_INTERVAL = timedelta(seconds=10) + +DOCKER_SCHEMA = vol.Schema( + { + vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, + vol.Optional(CONF_URL, default=None): vol.Any(cv.string, None), + vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL): cv.time_period, + vol.Optional( + CONF_MONITORED_CONDITIONS, default=MONITORED_CONDITIONS_LIST + ): vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS_LIST)]), + vol.Optional(CONF_CONTAINERS, default=[]): cv.ensure_list, + vol.Optional(CONF_RENAME, default={}): dict, + vol.Optional(CONF_SENSORNAME, default=DEFAULT_SENSORNAME): cv.string, + vol.Optional(CONF_SWITCHNAME, default=DEFAULT_SWITCHNAME): cv.string, + } +) + +CONFIG_SCHEMA = vol.Schema( + {DOMAIN: vol.All(cv.ensure_list, [vol.Any(DOCKER_SCHEMA)])}, extra=vol.ALLOW_EXTRA +) + +################################################################# +async def async_setup(hass, config): + """Setup the Monitor Docker platform.""" + + def RunDocker(hass, entry): + """Wrapper function for a separated thread.""" + + # Create out asyncio loop, because we are already inside + # a def (not main) we need to do create/set + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + # Create docker instance, it will have asyncio threads + hass.data[DOMAIN][entry[CONF_NAME]] = {} + hass.data[DOMAIN][entry[CONF_NAME]][CONFIG] = entry + hass.data[DOMAIN][entry[CONF_NAME]][API] = DockerAPI(hass, entry) + + # _LOGGER.error("load_platform switch") + # load_platform(self._hass, "switch", DOMAIN, self._config, self._config) + + # Now run forever in this separated thread + loop.run_forever() + + # Create domain monitor_docker data variable + hass.data[DOMAIN] = {} + + # Now go through all possible entries, we support 1 or more docker hosts (untested) + for entry in config[DOMAIN]: + if entry[CONF_NAME] in hass.data[DOMAIN]: + _LOGGER.error( + "Instance %s is duplicate, please assign an unique name", + entry[CONF_NAME], + ) + return False + + # Each docker hosts runs in its own thread. We need to pass hass too, for the load_platform + thread = threading.Thread( + target=RunDocker, kwargs={"hass": hass, "entry": entry} + ) + thread.start() + + return True diff --git a/custom_components/monitor_docker/const.py b/custom_components/monitor_docker/const.py new file mode 100644 index 0000000..3342472 --- /dev/null +++ b/custom_components/monitor_docker/const.py @@ -0,0 +1,90 @@ +"""Define constants for the Monitor Docker component.""" + +DOMAIN = "monitor_docker" +API = "api" +CONFIG = "config" +CONTAINER = "container" + +CONF_CONTAINERS = "containers" +CONF_RENAME = "rename" +CONF_SENSORNAME = "sensorname" +CONF_SWITCHNAME = "switchname" + +DEFAULT_NAME = "Docker" +DEFAULT_SENSORNAME = "Docker {name} {sensorname}" +DEFAULT_SWITCHNAME = "Docker {name}" + +COMPONENTS = ["sensor", "switch"] + +PRECISION = 2 + +DOCKER_INFO_VERSION = "version" +DOCKER_INFO_CONTAINER_RUNNING = "containers_running" +DOCKER_INFO_CONTAINER_TOTAL = "containers_total" +DOCKER_STATS_CPU_PERCENTAGE = "containers_cpu_percentage" +DOCKER_STATS_MEMORY = "containers_memory" +DOCKER_STATS_MEMORY_PERCENTAGE = "containers_memory_percentage" + +CONTAINER_INFO_STATE = "state" +CONTAINER_INFO_STATUS = "status" +CONTAINER_INFO_NETWORKMODE = "networkmode" +CONTAINER_INFO_UPTIME = "uptime" +CONTAINER_INFO_IMAGE = "image" +CONTAINER_STATS_CPU_PERCENTAGE = "cpu_percentage" +CONTAINER_STATS_MEMORY = "memory" +CONTAINER_STATS_MEMORY_PERCENTAGE = "memory_percentage" +CONTAINER_STATS_NETWORK_SPEED_UP = "network_speed_up" +CONTAINER_STATS_NETWORK_SPEED_DOWN = "network_speed_down" +CONTAINER_STATS_NETWORK_TOTAL_UP = "network_total_up" +CONTAINER_STATS_NETWORK_TOTAL_DOWN = "network_total_down" + +DOCKER_MONITOR_LIST = { + DOCKER_INFO_VERSION: ["Version", None, "mdi:information-outline", None], + DOCKER_INFO_CONTAINER_RUNNING: ["Containers Running", None, "mdi:docker", None], + DOCKER_INFO_CONTAINER_TOTAL: ["Containers Total", None, "mdi:docker", None], + DOCKER_STATS_CPU_PERCENTAGE: ["CPU", "%", "mdi:chip", None], + DOCKER_STATS_MEMORY: ["Memory", "MB", "mdi:memory", None], + DOCKER_STATS_MEMORY_PERCENTAGE: ["Memory (percent)", "%", "mdi:memory", None], +} + +CONTAINER_MONITOR_LIST = { + CONTAINER_INFO_STATE: ["State", None, "mdi:checkbox-marked-circle-outline", None], + CONTAINER_INFO_STATUS: ["Status", None, "mdi:checkbox-marked-circle-outline", None], + CONTAINER_INFO_UPTIME: ["Up Time", "", "mdi:clock", "timestamp"], + CONTAINER_INFO_IMAGE: ["Image", None, "mdi:information-outline", None], + CONTAINER_STATS_CPU_PERCENTAGE: ["CPU", "%", "mdi:chip", None], + CONTAINER_STATS_MEMORY: ["Memory", "MB", "mdi:memory", None], + CONTAINER_STATS_MEMORY_PERCENTAGE: ["Memory (percent)", "%", "mdi:memory", None], + CONTAINER_STATS_NETWORK_SPEED_UP: ["Network speed Up", "kB/s", "mdi:upload", None], + CONTAINER_STATS_NETWORK_SPEED_DOWN: [ + "Network speed Down", + "kB/s", + "mdi:download", + None, + ], + CONTAINER_STATS_NETWORK_TOTAL_UP: ["Network total Up", "MB", "mdi:upload", None], + CONTAINER_STATS_NETWORK_TOTAL_DOWN: [ + "Network total Down", + "MB", + "mdi:download", + None, + ], +} + +CONTAINER_MONITOR_NETWORK_LIST = [ + CONTAINER_STATS_NETWORK_SPEED_UP, + CONTAINER_STATS_NETWORK_SPEED_DOWN, + CONTAINER_STATS_NETWORK_TOTAL_UP, + CONTAINER_STATS_NETWORK_TOTAL_DOWN, +] + +MONITORED_CONDITIONS_LIST = list(DOCKER_MONITOR_LIST.keys()) + list( + CONTAINER_MONITOR_LIST.keys() +) + +ATTR_MEMORY_LIMIT = "Memory_limit" +ATTR_ONLINE_CPUS = "Online_CPUs" +ATTR_VERSION_ARCH = "Architecture" +ATTR_VERSION_KERNEL = "Kernel" +ATTR_VERSION_OS = "OS" +ATTR_VERSION_OS_TYPE = "OS_Type" diff --git a/custom_components/monitor_docker/helpers.py b/custom_components/monitor_docker/helpers.py new file mode 100644 index 0000000..22b1f51 --- /dev/null +++ b/custom_components/monitor_docker/helpers.py @@ -0,0 +1,690 @@ +"""Monitor Docker API helper.""" + +import aiodocker +import asyncio +import concurrent +import logging +import time +import threading + +from datetime import datetime, timezone +from dateutil import parser, relativedelta + +from homeassistant.helpers.discovery import load_platform + +import homeassistant.util.dt as dt_util + +from homeassistant.const import ( + CONF_MONITORED_CONDITIONS, + CONF_NAME, + CONF_SCAN_INTERVAL, + CONF_URL, + EVENT_HOMEASSISTANT_STOP, +) + +from .const import ( + ATTR_MEMORY_LIMIT, + ATTR_ONLINE_CPUS, + ATTR_VERSION_ARCH, + ATTR_VERSION_KERNEL, + ATTR_VERSION_OS, + ATTR_VERSION_OS_TYPE, + COMPONENTS, + CONFIG, + CONTAINER, + CONTAINER_STATS_CPU_PERCENTAGE, + CONTAINER_INFO_IMAGE, + CONTAINER_INFO_NETWORKMODE, + CONTAINER_STATS_MEMORY, + CONTAINER_STATS_MEMORY_PERCENTAGE, + CONTAINER_STATS_NETWORK_SPEED_UP, + CONTAINER_STATS_NETWORK_SPEED_DOWN, + CONTAINER_STATS_NETWORK_TOTAL_UP, + CONTAINER_STATS_NETWORK_TOTAL_DOWN, + CONTAINER_INFO_STATE, + CONTAINER_INFO_STATUS, + CONTAINER_INFO_UPTIME, + DOCKER_INFO_CONTAINER_RUNNING, + DOCKER_INFO_CONTAINER_TOTAL, + DOCKER_INFO_VERSION, + DOCKER_STATS_CPU_PERCENTAGE, + DOCKER_STATS_MEMORY, + DOCKER_STATS_MEMORY_PERCENTAGE, + DOMAIN, + PRECISION, +) + +_LOGGER = logging.getLogger(__name__) + +# Bytes to MBytes +toKB = lambda a: round(a / (1024 ** 1), PRECISION) +toMB = lambda a: round(a / (1024 ** 2), PRECISION) + +################################################################# +class DockerAPI: + """Docker API abstraction allowing multiple Docker instances beeing monitored.""" + + def __init__(self, hass, config): + self._hass = hass + self._config = config + self._containers = {} + self._tasks = {} + self._info = {} + + self._interval = config[CONF_SCAN_INTERVAL].seconds + + self._loop = asyncio.get_event_loop() + + try: + self._api = aiodocker.Docker(url=self._config[CONF_URL]) + except Exception as err: + _LOGGER.error("Can not connect to Docker API (%s)", str(err)) + return + + version = self._loop.run_until_complete(self._api.version()) + _LOGGER.debug("Docker version: %s", version.get("Version", None)) + + # Start task to monitor events of create/delete/start/stop + self._tasks["events"] = self._loop.create_task(self._run_docker_events()) + + # Start task to monitor total/running containers + self._tasks["info"] = self._loop.create_task(self._run_docker_info()) + + # Get the list of containers to monitor + containers = self._loop.run_until_complete(self._api.containers.list(all=True)) + + for container in containers or []: + # Determine name from Docker API, it contains an array with a slash + cname = container._container["Names"][0][1:] + + # We will monitor all containers, including excluded ones. + # This is needed to get total CPU/Memory usage. + _LOGGER.debug("%s: Container Monitored", cname) + + # Create our Docker Container API + self._containers[cname] = DockerContainerAPI( + self._api, cname, self._interval + ) + + hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self._monitor_stop) + + for component in COMPONENTS: + load_platform( + self._hass, + component, + DOMAIN, + {CONF_NAME: self._config[CONF_NAME]}, + self._config, + ) + + ############################################################# + def _monitor_stop(self, _service_or_event): + """Stop the monitor thread.""" + _LOGGER.info("Stopping Monitor Docker thread (%s)", self._config[CONF_NAME]) + + self._loop.stop() + + ############################################################# + async def _run_docker_events(self): + """Function to retrieve docker events. We can add or remove monitored containers.""" + + try: + while True: + subscriber = self._api.events.subscribe() + + event = await subscriber.get() + if event is None: + break + + # Only monitor container events + if event["Type"] == "container": + if event["Action"] == "create": + cname = event["Actor"]["Attributes"]["name"] + await self._container_add(cname) + + for component in COMPONENTS: + load_platform( + self._hass, + component, + DOMAIN, + {CONF_NAME: self._config[CONF_NAME], CONTAINER: cname}, + self._config, + ) + + if event["Action"] == "destroy": + cname = event["Actor"]["Attributes"]["name"] + await self._container_remove(cname) + except Exception as err: + _LOGGER.error(" run_docker_events (%s)", str(err), exc_info=True) + + ############################################################# + async def _container_add(self, cname): + + if cname in self._containers: + _LOGGER.error("%s: Container already monitored", cname) + return + + _LOGGER.debug("%s: Starting Container Monitor", cname) + + # Create our Docker Container API + self._containers[cname] = DockerContainerAPI( + self._api, cname, self._interval, False + ) + + ############################################################# + async def _container_remove(self, cname): + + if cname in self._containers: + _LOGGER.debug("%s: Stopping Container Monitor", cname) + self._containers[cname].cancel_task() + self._containers[cname].remove_entities() + await asyncio.sleep(0.1) + del self._containers[cname] + else: + _LOGGER.error("%s: Container is NOT monitored", cname) + + ############################################################# + async def _run_docker_info(self): + """Function to retrieve information like docker info.""" + + try: + while True: + info = await self._api.system.info() + self._info[DOCKER_INFO_VERSION] = info.get("ServerVersion") + self._info[DOCKER_INFO_CONTAINER_RUNNING] = info.get("ContainersRunning") + self._info[DOCKER_INFO_CONTAINER_TOTAL] = info.get("Containers") + + self._info[ATTR_MEMORY_LIMIT] = info.get("MemTotal") + self._info[ATTR_ONLINE_CPUS] = info.get("NCPU") + self._info[ATTR_VERSION_OS] = info.get("OperationSystem") + self._info[ATTR_VERSION_OS_TYPE] = info.get("OStype") + self._info[ATTR_VERSION_ARCH] = info.get("Architecture") + self._info[ATTR_VERSION_KERNEL] = info.get("KernelVersion") + + self._info[DOCKER_STATS_CPU_PERCENTAGE] = 0.0 + self._info[DOCKER_STATS_MEMORY] = 0 + self._info[DOCKER_STATS_MEMORY_PERCENTAGE] = 0.0 + + # Now go through all containers and get the cpu/memory stats + for container in self._containers.values(): + try: + info = container.get_info() + if info.get(CONTAINER_INFO_STATE) == "running": + stats = container.get_stats() + if stats.get(CONTAINER_STATS_CPU_PERCENTAGE) is not None: + self._info[DOCKER_STATS_CPU_PERCENTAGE] += stats.get( + CONTAINER_STATS_CPU_PERCENTAGE + ) + if stats.get(CONTAINER_STATS_MEMORY) is not None: + self._info[DOCKER_STATS_MEMORY] += stats.get( + CONTAINER_STATS_MEMORY + ) + if stats.get(CONTAINER_STATS_MEMORY_PERCENTAGE) is not None: + self._info[DOCKER_STATS_MEMORY_PERCENTAGE] += stats.get( + CONTAINER_STATS_MEMORY_PERCENTAGE + ) + except Exception as err: + _LOGGER.error( + "%s: run_docker_info memory/cpu of X (%s)", + self._config[CONF_NAME], + str(err), + exc_info=True, + ) + + self._info[DOCKER_STATS_CPU_PERCENTAGE] = round( + self._info[DOCKER_STATS_CPU_PERCENTAGE], PRECISION + ) + self._info[DOCKER_STATS_MEMORY] = round( + self._info[DOCKER_STATS_MEMORY], PRECISION + ) + self._info[DOCKER_STATS_MEMORY_PERCENTAGE] = round( + self._info[DOCKER_STATS_MEMORY_PERCENTAGE], PRECISION + ) + + _LOGGER.debug( + "Version: %s, Containers: %s, Running: %s, CPU: %s%%, Memory: %sMB, %s%%", + self._info[DOCKER_INFO_VERSION], + self._info[DOCKER_INFO_CONTAINER_TOTAL], + self._info[DOCKER_INFO_CONTAINER_RUNNING], + self._info[DOCKER_STATS_CPU_PERCENTAGE], + self._info[DOCKER_STATS_MEMORY], + self._info[DOCKER_STATS_MEMORY_PERCENTAGE], + ) + + await asyncio.sleep(self._interval) + except Exception as err: + _LOGGER.error( + "%s: run_docker_info (%s)", + self._config[CONF_NAME], + str(err), + exc_info=True, + ) + + ############################################################# + def list_containers(self): + return self._containers.keys() + + ############################################################# + def get_container(self, cname): + if cname in self._containers: + return self._containers[cname] + else: + _LOGGER.error("Trying to get a not existing container %s", cname) + return None + + ############################################################# + def get_info(self): + return self._info + + +################################################################# +class DockerContainerAPI: + """Docker Container API abstraction.""" + + def __init__(self, api, name, interval, atInit=True): + self._api = api + self._name = name + self._interval = interval + self._busy = False + self._atInit = atInit + self._task = None + self._subscribers = [] + self._cpu_old = {} + self._network_old = {} + + self._info = {} + self._stats = {} + + self._loop = asyncio.get_event_loop() + + # During start-up we will wait on container attachment, + # preventing concurrency issues the main HA loop (we are + # othside that one with our threads) + if self._atInit: + try: + self._container = self._loop.run_until_complete( + self._api.containers.get(self._name) + ) + except Exception as err: + _LOGGER.error( + "%s: Container not available anymore (%s)", + self._name, + str(err), + exc_info=True, + ) + return + + self._task = self._loop.create_task(self._run()) + + ############################################################# + async def _run(self): + + # If we noticed a event=create, we need to attach here. + # The run_until_complete doesn't work, because we are already + # in a running loop. + if not self._atInit: + try: + self._container = await self._api.containers.get(self._name) + except Exception as err: + _LOGGER.error( + "%s: Container not available anymore (%s)", + self._name, + str(err), + exc_info=True, + ) + return + + try: + while True: + + # Don't check container if we are doing a start/stop + if not self._busy: + await self._run_container_info() + + # Only run stats if container is running + if self._info[CONTAINER_INFO_STATE] in ("running", "paused"): + await self._run_container_stats() + + self._notify() + else: + _LOGGER.debug("%s: Waiting on stop/start of container", self._name) + + await asyncio.sleep(self._interval) + except concurrent.futures._base.CancelledError as err: + pass + except Exception as err: + _LOGGER.error( + "%s: Container not available anymore (%s)", + self._name, + str(err), + exc_info=True, + ) + + ############################################################# + async def _run_container_info(self): + """Get container information, but we can not get + the uptime of this container, that is only available + while listing all containers :-(.""" + + self._info = {} + + raw = await self._container.show() + + self._info[CONTAINER_INFO_STATE] = raw["State"]["Status"] + self._info[CONTAINER_INFO_IMAGE] = raw["Config"]["Image"] + self._info[CONTAINER_INFO_NETWORKMODE] = ( + True if raw["HostConfig"]["NetworkMode"] == "host" else False + ) + + # We only do a calculation of startedAt, because we use it twice + startedAt = parser.parse(raw["State"]["StartedAt"]) + + # Determine the container status in the format: + # Up 6 days + # Up 6 days (Paused) + # Exited (0) 2 months ago + # Restarting (99) 5 seconds ago + + if self._info[CONTAINER_INFO_STATE] == "running": + self._info[CONTAINER_INFO_STATUS] = "Up {}".format( + self._calcdockerformat(startedAt) + ) + elif self._info[CONTAINER_INFO_STATE] == "exited": + self._info[CONTAINER_INFO_STATUS] = "Exited ({}) {} ago".format( + raw["State"]["ExitCode"], + self._calcdockerformat(parser.parse(raw["State"]["FinishedAt"])), + ) + elif self._info[CONTAINER_INFO_STATE] == "created": + self._info[CONTAINER_INFO_STATUS] = "Created {} ago".format( + self._calcdockerformat(parser.parse(raw["Created"])) + ) + elif self._info[CONTAINER_INFO_STATE] == "restarting": + self._info[CONTAINER_INFO_STATUS] = "Restarting" + elif self._info[CONTAINER_INFO_STATE] == "paused": + self._info[CONTAINER_INFO_STATUS] = "Up {} (Paused)".format( + self._calcdockerformat(startedAt) + ) + else: + self._info[CONTAINER_INFO_STATUS] = "None ({})".format( + raw["State"]["Status"] + ) + + if self._info[CONTAINER_INFO_STATE] in ("running", "paused"): + self._info[CONTAINER_INFO_UPTIME] = dt_util.as_local(startedAt).isoformat() + else: + self._info[CONTAINER_INFO_UPTIME] = None + _LOGGER.debug("%s: %s", self._name, self._info[CONTAINER_INFO_STATUS]) + + ############################################################# + async def _run_container_stats(self): + + # Initialize stats information + stats = {} + stats["cpu"] = {} + stats["memory"] = {} + stats["network"] = {} + stats["read"] = {} + + # Get container stats, only interested in [0] + raw = await self._container.stats(stream=False) + raw = raw[0] + + stats["read"] = parser.parse(raw["read"]) + + # Gather CPU information + cpu_stats = {} + try: + cpu_new = {} + cpu_new["total"] = raw["cpu_stats"]["cpu_usage"]["total_usage"] + cpu_new["system"] = raw["cpu_stats"]["system_cpu_usage"] + + # Compatibility wih older Docker API + if "online_cpus" in raw["cpu_stats"]: + cpu_stats["online_cpus"] = raw["cpu_stats"]["online_cpus"] + else: + cpu_stats["online_cpus"] = len( + raw["cpu_stats"]["cpu_usage"]["percpu_usage"] or [] + ) + + # Calculate cpu usage, but first iteration we don't know it + if self._cpu_old: + cpu_delta = float(cpu_new["total"] - self._cpu_old["total"]) + system_delta = float(cpu_new["system"] - self._cpu_old["system"]) + + cpu_stats["total"] = round(0.0, PRECISION) + if cpu_delta > 0.0 and system_delta > 0.0: + cpu_stats["total"] = round( + (cpu_delta / system_delta) + * float(cpu_stats["online_cpus"]) + * 100.0, + PRECISION, + ) + + self._cpu_old = cpu_new + + except KeyError as err: + # Something wrong with the raw data + _LOGGER.error( + "%s: Can not determine CPU usage for container (%s)", + self._name, + str(err), + ) + if "cpu_stats" in raw: + _LOGGER.error("Raw 'cpu_stats' %s", raw["cpu_stats"]) + else: + _LOGGER.error("No 'cpu_stats' found in raw packet") + + # Gather memory information + memory_stats = {} + try: + # Memory is in Bytes, convert to MBytes + memory_stats["usage"] = toMB( + raw["memory_stats"]["usage"] - raw["memory_stats"]["stats"]["cache"] + ) + memory_stats["limit"] = toMB(raw["memory_stats"]["limit"]) + memory_stats["max_usage"] = toMB(raw["memory_stats"]["max_usage"]) + memory_stats["usage_percent"] = round( + float(memory_stats["usage"]) / float(memory_stats["limit"]) * 100.0, + PRECISION, + ) + + except (KeyError, TypeError) as err: + _LOGGER.error( + "%s: Can not determine memory usage for container (%s)", + self._name, + str(err), + ) + if "memory_stats" in raw: + _LOGGER.error( + "%s: Raw 'memory_stats' %s", raw["memory_stats"], self._name + ) + else: + _LOGGER.error("%s: No 'memory_stats' found in raw packet", self._name) + + _LOGGER.debug( + "%s: CPU Usage=%s%%. Memory Usage=%sMB, %s%%", + self._name, + cpu_stats.get("total", None), + memory_stats.get("usage", None), + memory_stats.get("usage_percent", None), + ) + + # Gather network information, doesn't work in network=host mode + network_stats = {} + if not self._info[CONTAINER_INFO_NETWORKMODE]: + try: + network_new = {} + network_stats["total_tx"] = 0 + network_stats["total_rx"] = 0 + for if_name, data in raw["networks"].items(): + network_stats["total_tx"] += data["tx_bytes"] + network_stats["total_rx"] += data["rx_bytes"] + + network_new = { + "read": stats["read"], + "total_tx": network_stats["total_tx"], + "total_rx": network_stats["total_rx"], + } + + if self._network_old: + tx = network_new["total_tx"] - self._network_old["total_tx"] + rx = network_new["total_rx"] - self._network_old["total_rx"] + tim = ( + network_new["read"] - self._network_old["read"] + ).total_seconds() + + # Calculate speed, also convert to kByte/sec + network_stats["speed_tx"] = toKB(round(float(tx) / tim, PRECISION)) + network_stats["speed_rx"] = toKB(round(float(rx) / tim, PRECISION)) + + self._network_old = network_new + + # Convert total to MB + network_stats["total_tx"] = toMB(network_stats["total_tx"]) + network_stats["total_rx"] = toMB(network_stats["total_rx"]) + + except KeyError as err: + _LOGGER.error( + "%s: Can not determine network usage for container (%s)", + self._name, + str(err), + ) + if "networks" in raw: + _LOGGER.error("%s: Raw 'networks' %s", raw["networks"], self._name) + else: + _LOGGER.error("%s: No 'networks' found in raw packet", self._name) + + # All information collected + stats["cpu"] = cpu_stats + stats["memory"] = memory_stats + stats["network"] = network_stats + + stats[CONTAINER_STATS_CPU_PERCENTAGE] = cpu_stats.get("total") + stats[CONTAINER_STATS_MEMORY] = memory_stats.get("usage") + stats[CONTAINER_STATS_MEMORY_PERCENTAGE] = memory_stats.get("usage_percent") + stats[CONTAINER_STATS_NETWORK_SPEED_UP] = network_stats.get("speed_tx") + stats[CONTAINER_STATS_NETWORK_SPEED_DOWN] = network_stats.get("speed_rx") + stats[CONTAINER_STATS_NETWORK_TOTAL_UP] = network_stats.get("total_tx") + stats[CONTAINER_STATS_NETWORK_TOTAL_DOWN] = network_stats.get("total_rx") + + self._stats = stats + + ############################################################# + def cancel_task(self): + if self._task is not None: + _LOGGER.info("%s: Cancelling task for container info/stats", self._name) + self._task.cancel() + else: + _LOGGER.info( + "%s: Task (not running) can not be cancelled for container info/stats", + self._name, + ) + + ############################################################# + def remove_entities(self): + if len(self._subscribers) > 0: + _LOGGER.debug("%s: Removing entities from container", self._name) + + for callback in self._subscribers: + callback(remove=True) + + self._subscriber = [] + + ############################################################# + async def _start(self): + """Separate loop to start container, because HA loop can't be used.""" + + try: + await self._container.start() + except Exception as err: + _LOGGER.error("%s: Can not start containner (%s)", self._name, str(err)) + finally: + self._busy = False + + ############################################################# + async def start(self): + """Called from HA switch.""" + _LOGGER.info("%s: Start container", self._name) + + self._busy = True + self._loop.create_task(self._start()) + + ############################################################# + async def _stop(self): + """Separate loop to stop container, because HA loop can't be used.""" + try: + await self._container.stop(t=10) + except Exception as err: + _LOGGER.error("%s: Can not stop containner (%s)", self._name, str(err)) + finally: + self._busy = False + + ############################################################# + async def stop(self): + """Called from HA switch.""" + _LOGGER.info("%s: Stop container", self._name) + + self._busy = True + self._loop.create_task(self._stop()) + + ############################################################# + def get_name(self): + """Return the container name.""" + return self._name + + ############################################################# + def get_info(self): + """Return the container info.""" + return self._info + + ############################################################# + def get_stats(self): + """Return the container stats.""" + return self._stats + + ############################################################# + def register_callback(self, callback, variable): + """Register callback from sensor/switch.""" + if callback not in self._subscribers: + _LOGGER.debug( + "%s: Added callback to container, entity: %s", self._name, variable + ) + self._subscribers.append(callback) + + ############################################################# + def _notify(self): + if len(self._subscribers) > 0: + _LOGGER.debug( + "%s: Send notify (%d) to container", self._name, len(self._subscribers) + ) + + for callback in self._subscribers: + callback() + + ############################################################# + @staticmethod + def _calcdockerformat(dt): + """Calculate datetime to Docker format, because it isn't available in stats.""" + if dt is None: + return "None" + + delta = relativedelta.relativedelta(datetime.now(timezone.utc), dt) + + if delta.years != 0: + return "{} {}".format(delta.years, "year" if delta.years == 1 else "years") + elif delta.months != 0: + return "{} {}".format( + delta.months, "month" if delta.months == 1 else "months" + ) + elif delta.days != 0: + return "{} {}".format(delta.days, "day" if delta.days == 1 else "days") + elif delta.hours != 0: + return "{} {}".format(delta.hours, "hour" if delta.hours == 1 else "hours") + elif delta.minutes != 0: + return "{} {}".format( + delta.minutes, "minute" if delta.minutes == 1 else "minutes" + ) + + return "{} {}".format( + delta.seconds, "second" if delta.seconds == 1 else "seconds" + ) diff --git a/custom_components/monitor_docker/manifest.json b/custom_components/monitor_docker/manifest.json new file mode 100644 index 0000000..6e45606 --- /dev/null +++ b/custom_components/monitor_docker/manifest.json @@ -0,0 +1,7 @@ +{ + "domain": "monitor_docker", + "name": "monitor_docker", + "documentation": "https://www.home-assistant.io/integrations/monitor_docker", + "requirements": ["aiodocker==0.18.8", "python-dateutil==2.8.1"], + "codeowners": ["@ualex73"] +} diff --git a/custom_components/monitor_docker/sensor.py b/custom_components/monitor_docker/sensor.py new file mode 100644 index 0000000..ae12bd1 --- /dev/null +++ b/custom_components/monitor_docker/sensor.py @@ -0,0 +1,300 @@ +"""Docker Monitor sensor component.""" + +import asyncio +import logging + +from homeassistant.components.sensor import ENTITY_ID_FORMAT +from homeassistant.const import CONF_NAME, CONF_MONITORED_CONDITIONS +from homeassistant.helpers.entity import Entity +from homeassistant.util import slugify +import homeassistant.util.dt as dt_util + +from .const import ( + DOMAIN, + API, + ATTR_MEMORY_LIMIT, + ATTR_ONLINE_CPUS, + ATTR_VERSION_ARCH, + ATTR_VERSION_KERNEL, + ATTR_VERSION_OS, + ATTR_VERSION_OS_TYPE, + CONFIG, + CONF_CONTAINERS, + CONF_RENAME, + CONF_SENSORNAME, + DOCKER_INFO_VERSION, + DOCKER_INFO_CONTAINER_RUNNING, + DOCKER_INFO_CONTAINER_TOTAL, + CONTAINER, + CONTAINER_INFO_NETWORKMODE, + CONTAINER_INFO_STATE, + CONTAINER_INFO_STATUS, + CONTAINER_MONITOR_LIST, + CONTAINER_MONITOR_NETWORK_LIST, + DOCKER_MONITOR_LIST, +) + +_LOGGER = logging.getLogger(__name__) + + +async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): + """Set up the Monitor Docker Switch.""" + + if discovery_info is None: + return + + name = discovery_info[CONF_NAME] + api = hass.data[DOMAIN][name][API] + config = hass.data[DOMAIN][name][CONFIG] + prefix = config[CONF_NAME] + + sensors = [] + sensors = [ + DockerSensor(api, prefix, variable) + for variable in config[CONF_MONITORED_CONDITIONS] + if variable in DOCKER_MONITOR_LIST + ] + + # We support add/re-add of a container + if CONTAINER in discovery_info: + clist = [discovery_info[CONTAINER]] + else: + clist = api.list_containers() + + for cname in clist: + if cname in config[CONF_CONTAINERS]: + # Try to figure out if we should include any network sensors + capi = api.get_container(cname) + info = capi.get_info() + networkmode = info.get(CONTAINER_INFO_NETWORKMODE) + if networkmode is None: + _LOGGER.error("%s: Can not determine networkmode?", cname) + networkmode = False + + _LOGGER.debug("%s: Adding component Sensor(s)", cname) + for variable in config[CONF_MONITORED_CONDITIONS]: + if variable in CONTAINER_MONITOR_LIST and ( + not networkmode + or (networkmode and variable not in CONTAINER_MONITOR_NETWORK_LIST) + ): + sensors += [ + DockerContainerSensor( + capi, + prefix, + cname, + config[CONF_RENAME].get(cname, cname), + variable, + config[CONF_SENSORNAME], + ) + ] + + async_add_entities(sensors, True) + + return True + + +class DockerSensor(Entity): + """Representation of a Docker Sensor.""" + + def __init__(self, api, prefix, variable): + """Initialize the sensor.""" + self._api = api + self._prefix = prefix + + self._var_id = variable + self._var_name = DOCKER_MONITOR_LIST[variable][0] + self._var_unit = DOCKER_MONITOR_LIST[variable][1] + self._var_icon = DOCKER_MONITOR_LIST[variable][2] + self._var_class = DOCKER_MONITOR_LIST[variable][3] + self._entity_id = ENTITY_ID_FORMAT.format( + slugify(self._prefix + "_" + self._var_name) + ) + + self._state = None + self._attributes = {} + + _LOGGER.info("Initializing Docker sensor '%s'", self._var_id) + + @property + def entity_id(self): + """Return the entity id of the sensor.""" + return self._entity_id + + @property + def name(self): + """Return the name of the sensor.""" + return "{} {}".format(self._prefix, self._var_name) + + @property + def icon(self): + """Icon to use in the frontend.""" + return self._var_icon + + @property + def state(self): + """Return the state of the sensor.""" + return self._state + + @property + def device_class(self): + """Return the class of this sensor.""" + return self._var_class + + @property + def unit_of_measurement(self): + """Return the unit the value is expressed in.""" + return self._var_unit + + def update(self): + """Get the latest data for the states.""" + info = self._api.get_info() + + if self._var_id == DOCKER_INFO_VERSION: + self._state = info.get(self._var_id) + self._attributes[ATTR_MEMORY_LIMIT] = info.get(ATTR_MEMORY_LIMIT) + self._attributes[ATTR_ONLINE_CPUS] = info.get(ATTR_ONLINE_CPUS) + self._attributes[ATTR_VERSION_ARCH] = info.get(ATTR_VERSION_ARCH) + self._attributes[ATTR_VERSION_OS] = info.get(ATTR_VERSION_OS) + self._attributes[ATTR_VERSION_OS_TYPE] = info.get(ATTR_VERSION_OS_TYPE) + self._attributes[ATTR_VERSION_KERNEL] = info.get(ATTR_VERSION_KERNEL) + else: + self._state = info.get(self._var_id) + + @property + def device_state_attributes(self): + """Return the state attributes.""" + return self._attributes + + async def async_added_to_hass(self): + """Register callbacks.""" + a = 1 + # self._api.register_callback(self.event_callback, self._var_id) + + def event_callback(self, remove=False): + """Callback for update of container information.""" + + _LOGGER.error("event callback called") + + +class DockerContainerSensor(Entity): + """Representation of a Docker Sensor.""" + + def __init__(self, container, prefix, cname, alias, variable, sensor_name_format): + """Initialize the sensor.""" + self._loop = asyncio.get_running_loop() + self._container = container + self._prefix = prefix + self._cname = cname + + self._var_id = variable + self._var_name = CONTAINER_MONITOR_LIST[variable][0] + self._var_unit = CONTAINER_MONITOR_LIST[variable][1] + self._var_icon = CONTAINER_MONITOR_LIST[variable][2] + self._var_class = CONTAINER_MONITOR_LIST[variable][3] + self._state_extra = None + + self._entity_id = ENTITY_ID_FORMAT.format( + slugify(self._prefix + "_" + self._cname + "_" + self._var_name) + ) + self._name = sensor_name_format.format(name=alias, sensorname=self._var_name) + + self._state = None + self._state_extra = None + + self._attributes = {} + + _LOGGER.info( + "%s: Initializing sensor with parameter: %s", self._cname, self._var_name + ) + + @property + def entity_id(self): + """Return the entity id of the cover.""" + return self._entity_id + + @property + def name(self): + """Return the name of the sensor, if any.""" + return self._name + + @property + def icon(self): + """Icon to use in the frontend, if any.""" + if self._var_id == CONTAINER_INFO_STATUS: + if self._state_extra == "running": + return "mdi:checkbox-marked-circle-outline" + else: + return "mdi:checkbox-blank-circle-outline" + + return self._var_icon + + @property + def should_poll(self): + return False + + @property + def state(self): + """Return the state of the sensor.""" + return self._state + + @property + def device_class(self): + """Return the class of this sensor.""" + return self._var_class + + @property + def unit_of_measurement(self): + """Return the unit the value is expressed in.""" + return self._var_unit + + @property + def device_state_attributes(self): + """Return the state attributes.""" + return self._attributes + + async def async_added_to_hass(self): + """Register callbacks.""" + self._container.register_callback(self.event_callback, self._var_id) + + # Call event callback for possible information available + self.event_callback() + + def event_callback(self, remove=False): + """Callback for update of container information.""" + + if remove: + _LOGGER.error("%s: Removing sensor entity: %s", self._cname, self._var_id) + self._loop.create_task(self.async_remove()) + return + + state = None + + _LOGGER.debug("%s: Received callback for: %s", self._cname, self._var_name) + + stats = {} + + try: + info = self._container.get_info() + + if info.get(CONTAINER_INFO_STATE) == "running": + stats = self._container.get_stats() + + except Exception as err: + _LOGGER.error("%s: Cannot request container info", str(err)) + else: + if self._var_id == CONTAINER_INFO_STATUS: + state = info.get(CONTAINER_INFO_STATUS) + self._state_extra = info.get(CONTAINER_INFO_STATE) + elif info.get(CONTAINER_INFO_STATE) == "running": + if self._var_id in CONTAINER_MONITOR_LIST: + state = stats.get(self._var_id) + + if state != self._state: + self._state = state + + try: + self.schedule_update_ha_state() + except Exception as err: + _LOGGER.error( + "Failed 'schedule_update_ha_state' %s", str(err), exc_info=True + ) diff --git a/custom_components/monitor_docker/switch.py b/custom_components/monitor_docker/switch.py new file mode 100644 index 0000000..7d87f55 --- /dev/null +++ b/custom_components/monitor_docker/switch.py @@ -0,0 +1,141 @@ +"""Monitor Docker switch component.""" + +import asyncio +import logging + +from homeassistant.components.switch import ENTITY_ID_FORMAT, SwitchEntity +from homeassistant.const import CONF_NAME +from homeassistant.util import slugify + +from .const import ( + DOMAIN, + API, + CONFIG, + CONF_CONTAINERS, + CONF_RENAME, + CONF_SWITCHNAME, + CONTAINER, + CONTAINER_INFO_STATE, +) + +_LOGGER = logging.getLogger(__name__) + + +async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): + """Set up the Monitor Docker Switch.""" + + if discovery_info is None: + return + + name = discovery_info[CONF_NAME] + api = hass.data[DOMAIN][name][API] + config = hass.data[DOMAIN][name][CONFIG] + prefix = config[CONF_NAME] + + switches = [] + + # We support add/re-add of a container + if CONTAINER in discovery_info: + clist = [discovery_info[CONTAINER]] + else: + clist = api.list_containers() + + for cname in clist: + if cname in config[CONF_CONTAINERS]: + _LOGGER.debug("%s: Adding component Switch", cname) + + switches.append( + DockerContainerSwitch( + api.get_container(cname), + prefix, + cname, + config[CONF_RENAME].get(cname, cname), + config[CONF_SWITCHNAME], + ) + ) + + if not switches: + _LOGGER.info("No containers set-up") + return False + + async_add_entities(switches, True) + + return True + + +class DockerContainerSwitch(SwitchEntity): + def __init__(self, container, prefix, cname, alias, name_format): + self._loop = asyncio.get_running_loop() + self._container = container + self._prefix = prefix + self._cname = cname + self._state = False + self._entity_id = ENTITY_ID_FORMAT.format( + slugify(self._prefix + "_" + self._cname) + ) + self._name = name_format.format(name=alias) + + @property + def entity_id(self): + """Return the entity id of the switch.""" + return self._entity_id + + @property + def name(self): + """Return the name of the sensor.""" + return self._name + + @property + def should_poll(self): + return False + + @property + def icon(self): + return "mdi:docker" + + @property + def device_state_attributes(self): + return {} + + @property + def is_on(self): + return self._state + + async def async_turn_on(self): + await self._container.start() + self._state = True + self.async_schedule_update_ha_state() + + async def async_turn_off(self): + await self._container.stop() + self._state = False + self.async_schedule_update_ha_state() + + async def async_added_to_hass(self): + """Register callbacks.""" + self._container.register_callback(self.event_callback, "switch") + + # Call event callback for possible information available + self.event_callback() + + def event_callback(self, remove=False): + """Callback for update of container information.""" + + if remove: + _LOGGER.error("%s: Removing switch entity", self._cname) + self._loop.create_task(self.async_remove()) + return + + state = None + + try: + info = self._container.get_info() + except Exception as err: + _LOGGER.error("%s: Cannot request container info", str(err)) + else: + if info is not None: + state = info.get(CONTAINER_INFO_STATE) == "running" + + if state is not self._state: + self._state = state + self.async_schedule_update_ha_state() diff --git a/hacs.json b/hacs.json new file mode 100644 index 0000000..c9bcb6a --- /dev/null +++ b/hacs.json @@ -0,0 +1,5 @@ +{ + "name": "Monitor Docker", + "render_readme": false, + "domains": ["sensor", "switch"] +} diff --git a/info.md b/info.md new file mode 100644 index 0000000..27b486a --- /dev/null +++ b/info.md @@ -0,0 +1,88 @@ +# Custom Monitor Docker component for Home Assistant + +[![maintainer](https://img.shields.io/badge/maintainer-ualex73-blue.svg?style=for-the-badge)](https://github.com/ualex73) + +## About + +This repository contains the Monitor Docker component I developed for monitor my Docker environment from [Home-Assistant](https://www.home-assistant.io). It is inspired by the Sander Huismans [Docker Monitor](https://github.com/Sanderhuisman/docker_monitor), where I switched mainly from threads to asyncio and put my own wishes/functionality in. Feel free to use the component and report bugs if you find them. If you want to contribute, please report a bug or pull request and I will reply as soon as possible. + +## Monitor Docker + +The Monitor Docker allows you to monitor Docker and container statistics and turn on/off containers. It can connected to the Docker daemon locally or remotely. When Home Assistant is used within a Docker container, the Docker daemon should be mounted as follows `-v /var/run/docker.sock:/var/run/docker.sock`. + +### Configuration + +To use the `monitor_docker` in your installation, add the following to your `configuration.yaml` file: + +```yaml +# Example configuration.yaml entry +monitor_docker: + - name: Docker + containers: + - appdaemon + - db-dsmr + - db-hass + - deconz + - dsmr + - hass + - influxdb + - mosquitto + - nodered + - unifi + rename: + appdaemon: AppDaemon + db-dsmr: "Database DSMR-Reader" + db-hass: Database Home Assistant + deconz: DeCONZ + domotiga: DomotiGa + dsmr: "DSMR-Reader" + hass: Home Assistant + influxdb: InfluxDB + mosquitto: Mosquitto + nodered: "Node-RED" + unifi: UniFi + sensorname: "{name}" + switchname: "{name}" + monitored_conditions: + - version + - containers_active + - containers_total + - status + - memory +``` + +#### Configuration variables + +| Parameter | Type | Description | +| -------------------- | ------------------------ | --------------------------------------------------------------------- | +| name | string (Required) | Client name of Docker daemon. Defaults to `Docker`. | +| url | string (Optional) | Host URL of Docker daemon. Defaults to `unix://var/run/docker.sock`. | +| scan_interval | time_period (Optional) | Update interval. Defaults to 10 seconds. | +| containers | list (Optional) | Array of containers to monitor. Defaults to all containers. | +| monitored_conditions | list (Optional) | Array of conditions to be monitored. Defaults to all conditions. | +| rename | dictionary (Optional) | Dictionary of containers to rename. Default no renaming. | +| sensorname | string (Optional) | Sensor string to format the name used in Home Assistant. Defaults to `Docker {name} {sensorname}`, where `{name}` is the container name and `{sensorname}` is e.g. Memory, Status, Network speed Up | +| switchname | string (optional) | Switch string to format the name used in Home Assistant. Defaults to `Docker {name}`, where `{name}` is the container name. | + +| Monitored Conditions | Description | Unit | +| --------------------------------- | ------------------------------- | ----- | +| version | Docker version | - | +| containers_total | Total number of containers | - | +| containers_running | Number of running containers | - | +| containers_cpu_percentage | CPU Usage | % | +| containers_memory | Memory usage | MB | +| containers_memory_percentage | Memory usage | % | +| status | Container status | - | +| uptime | Container start time | - | +| image | Container image | - | +| cpu_percentage | CPU usage | % | +| memory | Memory usage | MB | +| memory_percentage | Memory usage | % | +| network_speed_up | Network speed upstream | kB/s | +| network_speed_down | Network speed downstream | kB/s | +| network_total_up | Network total upstream | MB | +| network_total_down | Network total downstream | MB | + +## Credits + +* [Sanderhuisman](https://github.com/Sanderhuisman/docker_monitor)