diff --git a/.travis.yml b/.travis.yml index 24589d0f6d02e..159f8159328de 100644 --- a/.travis.yml +++ b/.travis.yml @@ -165,7 +165,7 @@ jobs: - stage: test env: CHECK=mongo PYTHON3=true - stage: test - env: CHECK=mysql + env: CHECK=mysql PYTHON3=true - stage: test env: CHECK=nagios - stage: test diff --git a/mysql/datadog_checks/mysql/mysql.py b/mysql/datadog_checks/mysql/mysql.py index ab764aceda756..b7ea8ecd58c0b 100644 --- a/mysql/datadog_checks/mysql/mysql.py +++ b/mysql/datadog_checks/mysql/mysql.py @@ -1,26 +1,25 @@ -# (C) Datadog, Inc. 2010-2017 +# (C) Datadog, Inc. 2018 # (C) Datadog, Inc. Patrick Galbraith 2013 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) - -# stdlib import re import traceback -from contextlib import closing, contextmanager from collections import defaultdict +from contextlib import closing, contextmanager -# 3p -from six import iteritems import pymysql +from six import PY3, iteritems, itervalues, text_type + try: import psutil PSUTIL_AVAILABLE = True except ImportError: PSUTIL_AVAILABLE = False -# project -from datadog_checks.checks import AgentCheck -from datadog_checks.config import _is_affirmative +from datadog_checks.base import AgentCheck, is_affirmative + +if PY3: + long = int GAUGE = "gauge" RATE = "rate" @@ -286,8 +285,9 @@ def __init__(self, name, init_config, agentConfig, instances=None): self.mysql_version = {} self.qcache_stats = {} - def get_library_versions(self): - return {"pymysql": pymysql.__version__} + @classmethod + def get_library_versions(cls): + return {'pymysql': pymysql.__version__} def check(self, instance): host, port, user, password, mysql_sock, \ @@ -297,17 +297,17 @@ def check(self, instance): self._set_qcache_stats() - if (not host or not user) and not defaults_file: + if not (host and user) and not defaults_file: raise Exception("Mysql host and user are needed.") with self._connect(host, port, mysql_sock, user, password, defaults_file, ssl, connect_timeout, tags) as db: try: # Metadata collection - self._collect_metadata(db, host) + self._collect_metadata(db) # Metric collection - self._collect_metrics(host, db, tags, options, queries, max_custom_queries) + self._collect_metrics(db, tags, options, queries, max_custom_queries) self._collect_system_metrics(host, db, tags) # keeping track of these: @@ -423,7 +423,7 @@ def _connect(self, host, port, mysql_sock, user, password, defaults_file, ssl, c if db: db.close() - def _collect_metrics(self, host, db, tags, options, queries, max_custom_queries): + def _collect_metrics(self, db, tags, options, queries, max_custom_queries): # Get aggregate of all VARS we want to collect metrics = STATUS_VARS @@ -432,7 +432,7 @@ def _collect_metrics(self, host, db, tags, options, queries, max_custom_queries) results = self._get_stats_from_status(db) results.update(self._get_stats_from_variables(db)) - if (not _is_affirmative(options.get('disable_innodb_metrics', False)) and self._is_innodb_engine_enabled(db)): + if not is_affirmative(options.get('disable_innodb_metrics', False)) and self._is_innodb_engine_enabled(db): results.update(self._get_stats_from_innodb_status(db)) innodb_keys = [ @@ -477,7 +477,7 @@ def _collect_metrics(self, host, db, tags, options, queries, max_custom_queries) except (KeyError, TypeError) as e: self.log.error("Not all InnoDB buffer pool metrics are available, unable to compute: {0}".format(e)) - if _is_affirmative(options.get('extra_innodb_metrics', False)): + if is_affirmative(options.get('extra_innodb_metrics', False)): self.log.debug("Collecting Extra Innodb Metrics") metrics.update(OPTIONAL_INNODB_VARS) @@ -509,41 +509,40 @@ def _collect_metrics(self, host, db, tags, options, queries, max_custom_queries) metrics.update(INNODB_VARS) metrics.update(BINLOG_VARS) - if _is_affirmative(options.get('extra_status_metrics', False)): + if is_affirmative(options.get('extra_status_metrics', False)): self.log.debug("Collecting Extra Status Metrics") metrics.update(OPTIONAL_STATUS_VARS) - if self._version_compatible(db, host, (5, 6, 6)): + if self._version_compatible(db, (5, 6, 6)): metrics.update(OPTIONAL_STATUS_VARS_5_6_6) - if _is_affirmative(options.get('galera_cluster', False)): + if is_affirmative(options.get('galera_cluster', False)): # already in result-set after 'SHOW STATUS' just add vars to collect self.log.debug("Collecting Galera Metrics.") metrics.update(GALERA_VARS) performance_schema_enabled = self._get_variable_enabled(results, 'performance_schema') - above_560 = self._version_compatible(db, host, (5, 6, 0)) - if _is_affirmative(options.get('extra_performance_metrics', False)) and above_560 and \ - performance_schema_enabled: + above_560 = self._version_compatible(db, (5, 6, 0)) + if is_affirmative(options.get('extra_performance_metrics', False)) and above_560 and performance_schema_enabled: # report avg query response time per schema to Datadog results['perf_digest_95th_percentile_avg_us'] = self._get_query_exec_time_95th_us(db) results['query_run_time_avg'] = self._query_exec_time_per_schema(db) metrics.update(PERFORMANCE_VARS) - if _is_affirmative(options.get('schema_size_metrics', False)): + if is_affirmative(options.get('schema_size_metrics', False)): # report avg query response time per schema to Datadog results['information_schema_size'] = self._query_size_per_schema(db) metrics.update(SCHEMA_VARS) - if _is_affirmative(options.get('replication', False)): + if is_affirmative(options.get('replication', False)): # Get replica stats - is_mariadb = self._get_is_mariadb(db, host) + is_mariadb = self._get_is_mariadb(db) replication_channel = options.get('replication_channel') if replication_channel: self.service_check_tags.append("channel:{0}".format(replication_channel)) tags.append("channel:{0}".format(replication_channel)) results.update(self._get_replica_stats(db, is_mariadb, replication_channel)) - nonblocking = _is_affirmative(options.get('replication_non_blocking_status', False)) + nonblocking = is_affirmative(options.get('replication_non_blocking_status', False)) results.update(self._get_slave_status(db, above_560, nonblocking)) metrics.update(REPLICA_VARS) @@ -556,13 +555,13 @@ def _collect_metrics(self, host, db, tags, options, queries, max_custom_queries) slave_io_running = self._collect_type('Slave_IO_Running', results, dict) slave_sql_running = self._collect_type('Slave_SQL_Running', results, dict) if slave_io_running: - slave_io_running = any(v.lower().strip() == 'yes' for v in slave_io_running.itervalues()) + slave_io_running = any(v.lower().strip() == 'yes' for v in itervalues(slave_io_running)) if slave_sql_running: - slave_sql_running = any(v.lower().strip() == 'yes' for v in slave_sql_running.itervalues()) + slave_sql_running = any(v.lower().strip() == 'yes' for v in itervalues(slave_sql_running)) # MySQL 5.7.x might not have 'Slave_running'. See: https://bugs.mysql.com/bug.php?id=78544 # look at replica vars collected at the top of if-block - if self._version_compatible(db, host, (5, 7, 0)): + if self._version_compatible(db, (5, 7, 0)): if not (slave_io_running is None and slave_sql_running is None): if slave_io_running and slave_sql_running: slave_running_status = AgentCheck.OK @@ -590,7 +589,7 @@ def _collect_metrics(self, host, db, tags, options, queries, max_custom_queries) slave_running_status = AgentCheck.CRITICAL # deprecated in favor of service_check("mysql.replication.slave_running") - self.gauge(self.SLAVE_SERVICE_CHECK_NAME, (1 if slave_running_status == AgentCheck.OK else 0), tags=tags) + self.gauge(self.SLAVE_SERVICE_CHECK_NAME, 1 if slave_running_status == AgentCheck.OK else 0, tags=tags) self.service_check(self.SLAVE_SERVICE_CHECK_NAME, slave_running_status, tags=self.service_check_tags) # "synthetic" metrics @@ -634,14 +633,14 @@ def _is_master(self, slaves, results): return False - def _collect_metadata(self, db, host): - version = self._get_version(db, host) + def _collect_metadata(self, db): + version = self._get_version(db) self.service_metadata('version', ".".join(version)) - def _submit_metrics(self, variables, dbResults, tags): - for variable, metric in variables.iteritems(): + def _submit_metrics(self, variables, db_results, tags): + for variable, metric in iteritems(variables): metric_name, metric_type = metric - for tag, value in self._collect_all_scalars(variable, dbResults): + for tag, value in self._collect_all_scalars(variable, db_results): metric_tags = list(tags) if tag: metric_tags.append(tag) @@ -655,12 +654,12 @@ def _submit_metrics(self, variables, dbResults, tags): elif metric_type == MONOTONIC: self.monotonic_count(metric_name, value, tags=metric_tags) - def _version_compatible(self, db, host, compat_version): + def _version_compatible(self, db, compat_version): # some patch version numbers contain letters (e.g. 5.0.51a) # so let's be careful when we compute the version number try: - mysql_version = self._get_version(db, host) + mysql_version = self._get_version(db) except Exception as e: self.warning("Cannot compute mysql version, assuming it's older.: %s" % str(e)) @@ -672,7 +671,7 @@ def _version_compatible(self, db, host, compat_version): return version >= compat_version - def _get_version(self, db, host): + def _get_version(self, db): hostkey = self._get_host_key() if hostkey in self.mysql_version: version = self.mysql_version[hostkey] @@ -691,7 +690,8 @@ def _get_version(self, db, host): self.mysql_version[hostkey] = version return version - def _get_is_mariadb(self, db, host): + @classmethod + def _get_is_mariadb(cls, db): with closing(db.cursor()) as cursor: cursor.execute('SELECT VERSION() LIKE "%MariaDB%"') result = cursor.fetchone() @@ -702,24 +702,24 @@ def _collect_all_scalars(self, key, dictionary): if key not in dictionary or dictionary[key] is None: yield None, None elif isinstance(dictionary[key], dict): - for tag, _ in dictionary[key].iteritems(): + for tag, _ in iteritems(dictionary[key]): yield tag, self._collect_type(tag, dictionary[key], float) else: yield None, self._collect_type(key, dictionary, float) - def _collect_scalar(self, key, dict): - return self._collect_type(key, dict, float) + def _collect_scalar(self, key, mapping): + return self._collect_type(key, mapping, float) - def _collect_string(self, key, dict): - return self._collect_type(key, dict, unicode) + def _collect_string(self, key, mapping): + return self._collect_type(key, mapping, text_type) - def _collect_type(self, key, dict, the_type): + def _collect_type(self, key, mapping, the_type): self.log.debug("Collecting data with %s" % key) - if key not in dict: + if key not in mapping: self.log.debug("%s returned None" % key) return None - self.log.debug("Collecting done, value %s" % dict[key]) - return the_type(dict[key]) + self.log.debug("Collecting done, value %s" % mapping[key]) + return the_type(mapping[key]) def _collect_dict(self, metric_type, field_metric_map, query, db, tags): """ @@ -735,9 +735,7 @@ def _collect_dict(self, metric_type, field_metric_map, query, db, tags): cursor.execute(query) result = cursor.fetchone() if result is not None: - for field in field_metric_map.keys(): - # Get the agent metric name from the column name - metric = field_metric_map[field] + for field, metric in list(iteritems(field_metric_map)): # Find the column name in the cursor description to identify the column index # http://www.python.org/dev/peps/pep-0249/ # cursor.description is a tuple of (column_name, ..., ...) @@ -748,14 +746,11 @@ def _collect_dict(self, metric_type, field_metric_map, query, db, tags): self.log.debug( "Collecting done, value %s" % result[col_idx]) if metric_type == GAUGE: - self.gauge(metric, float( - result[col_idx]), tags=tags) + self.gauge(metric, float(result[col_idx]), tags=tags) elif metric_type == RATE: - self.rate(metric, float( - result[col_idx]), tags=tags) + self.rate(metric, float(result[col_idx]), tags=tags) else: - self.gauge(metric, float( - result[col_idx]), tags=tags) + self.gauge(metric, float(result[col_idx]), tags=tags) else: self.log.debug( "Received value is None for index %d" % col_idx) @@ -817,9 +812,8 @@ def _get_server_pid(self, db): if pid_file is not None: self.log.debug("pid file: %s" % str(pid_file)) try: - f = open(pid_file) - pid = int(f.readline()) - f.close() + with open(pid_file, 'rb') as f: + pid = int(f.readline()) except IOError: self.log.debug("Cannot read mysql pid file %s" % pid_file) @@ -836,14 +830,16 @@ def _get_server_pid(self, db): return pid - def _get_stats_from_status(self, db): + @classmethod + def _get_stats_from_status(cls, db): with closing(db.cursor()) as cursor: cursor.execute("SHOW /*!50002 GLOBAL */ STATUS;") results = dict(cursor.fetchall()) return results - def _get_stats_from_variables(self, db): + @classmethod + def _get_stats_from_variables(cls, db): with closing(db.cursor()) as cursor: cursor.execute("SHOW GLOBAL VARIABLES;") results = dict(cursor.fetchall()) @@ -857,7 +853,7 @@ def _get_binary_log_stats(self, db): master_logs = dict(cursor.fetchall()) binary_log_space = 0 - for key, value in master_logs.iteritems(): + for key, value in iteritems(master_logs): binary_log_space += value return binary_log_space @@ -876,7 +872,7 @@ def _is_innodb_engine_enabled(self, db): support != 'no' and support != 'disabled'" ) - return (cursor.rowcount > 0) + return cursor.rowcount > 0 except (pymysql.err.InternalError, pymysql.err.OperationalError, pymysql.err.NotSupportedError) as e: self.warning("Possibly innodb stats unavailable - error querying engines table: %s" % str(e)) @@ -939,7 +935,7 @@ def _get_slave_status(self, db, above_560, nonblocking): cursor.execute("SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST WHERE COMMAND LIKE '%Binlog dump%'") slave_results = cursor.fetchall() slaves = 0 - for row in slave_results: + for _ in slave_results: slaves += 1 return {'Slaves_connected': slaves} @@ -948,8 +944,9 @@ def _get_slave_status(self, db, above_560, nonblocking): self.warning("Privileges error accessing the process tables (must grant PROCESS): %s" % str(e)) return {} - def _are_values_numeric(self, array): - return all([v.isdigit() for v in array]) + @classmethod + def _are_values_numeric(cls, array): + return all(v.isdigit() for v in array) def _get_stats_from_innodb_status(self, db): # There are a number of important InnoDB metrics that are reported in @@ -1268,14 +1265,14 @@ def _get_stats_from_innodb_status(self, db): # Finally we change back the metrics values to string to make the values # consistent with how they are reported by SHOW GLOBAL STATUS - for metric, value in results.iteritems(): + for metric, value in list(iteritems(results)): results[metric] = str(value) return results def _get_variable_enabled(self, results, var): enabled = self._collect_string(var, results) - return (enabled and enabled.lower().strip() == 'on') + return enabled and enabled.lower().strip() == 'on' def _get_query_exec_time_95th_us(self, db): # Fetches the 95th percentile query execution time and returns the value @@ -1295,7 +1292,7 @@ def _get_query_exec_time_95th_us(self, db): cursor.execute(sql_95th_percentile) if cursor.rowcount < 1: - self.warning("Failed to fetch records from the perf schema\ + self.warning("Failed to fetch records from the perf schema \ 'events_statements_summary_by_digest' table.") return None @@ -1311,7 +1308,8 @@ def _query_exec_time_per_schema(self, db): # Fetches the avg query execution time per schema and returns the # value in microseconds - sql_avg_query_run_time = """SELECT schema_name, ROUND((SUM(sum_timer_wait) / SUM(count_star)) / 1000000) AS avg_us + sql_avg_query_run_time = """\ + SELECT schema_name, ROUND((SUM(sum_timer_wait) / SUM(count_star)) / 1000000) AS avg_us FROM performance_schema.events_statements_summary_by_digest WHERE schema_name IS NOT NULL GROUP BY schema_name""" @@ -1385,7 +1383,7 @@ def _compute_synthetic_results(self, results): if not (int(results['Qcache_hits']) - self._qcache_hits): results['Qcache_instant_utilization'] = 0 else: - top = (float(results['Qcache_hits']) - self._qcache_hits) + top = float(results['Qcache_hits']) - self._qcache_hits bottom = ((int(results['Qcache_inserts']) - self._qcache_inserts) + (int(results['Qcache_not_cached']) - self._qcache_not_cached) + (int(results['Qcache_hits']) - self._qcache_hits)) diff --git a/mysql/setup.py b/mysql/setup.py index 15649a89136c5..58d90eeff6b2f 100644 --- a/mysql/setup.py +++ b/mysql/setup.py @@ -23,7 +23,7 @@ def get_requirements(fpath): return f.readlines() -CHECKS_BASE_REQ = 'datadog_checks_base' +CHECKS_BASE_REQ = 'datadog-checks-base>=4.2.0' setup( name='datadog-mysql', diff --git a/mysql/tests/common.py b/mysql/tests/common.py index b9944ca1f78ee..c76c33cb18d4c 100644 --- a/mysql/tests/common.py +++ b/mysql/tests/common.py @@ -1,10 +1,9 @@ -# (C) Datadog, Inc. 2010-2017 +# (C) Datadog, Inc. 2018 # All rights reserved -# Licensed under Simplified BSD License (see LICENSE) - +# Licensed under a 3-clause BSD style license (see LICENSE) import os -from datadog_checks.utils.common import get_docker_hostname +from datadog_checks.dev import get_docker_hostname HERE = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.dirname(os.path.dirname(HERE)) diff --git a/mysql/tests/common_config.py b/mysql/tests/common_config.py deleted file mode 100644 index 2dbad650e8571..0000000000000 --- a/mysql/tests/common_config.py +++ /dev/null @@ -1,48 +0,0 @@ -# (C) Datadog, Inc. 2010-2017 -# All rights reserved -# Licensed under Simplified BSD License (see LICENSE) - -import common -import tags - -MYSQL_MINIMAL_CONFIG = { - 'server': common.HOST, - 'user': common.USER, - 'pass': common.PASS, - 'port': common.PORT -} - -MYSQL_COMPLEX_CONFIG = { - 'server': common.HOST, - 'user': common.USER, - 'pass': common.PASS, - 'port': common.PORT, - 'options': { - 'replication': True, - 'extra_status_metrics': True, - 'extra_innodb_metrics': True, - 'extra_performance_metrics': True, - 'schema_size_metrics': True, - }, - 'tags': tags.METRIC_TAGS, - 'queries': [ - { - 'query': "SELECT * from testdb.users where name='Alice' limit 1;", - 'metric': 'alice.age', - 'type': 'gauge', - 'field': 'age' - }, - { - 'query': "SELECT * from testdb.users where name='Bob' limit 1;", - 'metric': 'bob.age', - 'type': 'gauge', - 'field': 'age' - } - ] -} - -CONNECTION_FAILURE = { - 'server': common.HOST, - 'user': 'unknown', - 'pass': common.PASS, -} diff --git a/mysql/tests/conftest.py b/mysql/tests/conftest.py index fe1f8b9690b74..4b6d3a77af87e 100644 --- a/mysql/tests/conftest.py +++ b/mysql/tests/conftest.py @@ -1,87 +1,96 @@ -# (C) Datadog, Inc. 2010-2017 +# (C) Datadog, Inc. 2018 # All rights reserved -# Licensed under Simplified BSD License (see LICENSE) -import subprocess -import time +# Licensed under a 3-clause BSD style license (see LICENSE) import os -import sys -import pytest import pymysql +import pytest -from . import common +from datadog_checks.dev import WaitFor, docker_run +from . import common, tags MYSQL_FLAVOR = os.getenv('MYSQL_FLAVOR', 'mysql') COMPOSE_FILE = '{}.yaml'.format(MYSQL_FLAVOR) -@pytest.fixture(scope="session") -def spin_up_mysql(): - """ - Start a cluster with one master, one replica and one unhealthy replica and - stop it after the tests are done. - If there's any problem executing docker-compose, let the exception bubble - up. - """ - env = os.environ - env['MYSQL_DOCKER_REPO'] = _mysql_docker_repo() - env['MYSQL_PORT'] = str(common.PORT) - env['MYSQL_SLAVE_PORT'] = str(common.SLAVE_PORT) - env['WAIT_FOR_IT_SCRIPT_PATH'] = _wait_for_it_script() - - args = [ - "docker-compose", - "-f", os.path.join(common.HERE, 'compose', COMPOSE_FILE) - ] - - subprocess.check_call(args + ["up", "-d"], env=env) - - # wait for the master and setup the database - started = False - for _ in xrange(15): - try: - passw = common.MARIA_ROOT_PASS if MYSQL_FLAVOR == 'mariadb' else '' - conn = pymysql.connect(host=common.HOST, port=common.PORT, user='root', password=passw) - _setup_master(conn) - sys.stderr.write("Master connected!\n") - started = True - break - except Exception as e: - sys.stderr.write("Exception starting master: {}\n".format(e)) - time.sleep(2) - - if not started: - subprocess.check_call(args + ["logs", "mysql-master"], env=env) - subprocess.check_call(args + ["down"], env=env) - raise Exception("Timeout starting master") - - # wait for the slave - started = False - for _ in xrange(60): - try: - pymysql.connect(host=common.HOST, port=common.SLAVE_PORT, user=common.USER, passwd=common.PASS) - sys.stderr.write("Slave connected!\n") - started = True - break - except Exception as e: - sys.stderr.write("Exception starting slave: {}\n".format(e)) - time.sleep(2) - - if not started: - subprocess.check_call(args + ["logs", "mysql-slave"], env=env) - subprocess.check_call(args + ["down"], env=env) - raise Exception("Timeout starting slave") - - yield - subprocess.check_call(args + ["down"], env=env) +@pytest.fixture(scope='session') +def dd_environment(instance_basic): + with docker_run( + os.path.join(common.HERE, 'compose', COMPOSE_FILE), + env_vars={ + 'MYSQL_DOCKER_REPO': _mysql_docker_repo(), + 'MYSQL_PORT': str(common.PORT), + 'MYSQL_SLAVE_PORT': str(common.SLAVE_PORT), + 'WAIT_FOR_IT_SCRIPT_PATH': _wait_for_it_script(), + }, + conditions=[ + WaitFor(connect_master, wait=2), + WaitFor(connect_slave, wait=2), + ], + ): + yield instance_basic + + +@pytest.fixture(scope='session') +def instance_basic(): + return { + 'server': common.HOST, + 'user': common.USER, + 'pass': common.PASS, + 'port': common.PORT, + } @pytest.fixture -def aggregator(): - from datadog_checks.stubs import aggregator - aggregator.reset() - return aggregator +def instance_complex(): + return { + 'server': common.HOST, + 'user': common.USER, + 'pass': common.PASS, + 'port': common.PORT, + 'options': { + 'replication': True, + 'extra_status_metrics': True, + 'extra_innodb_metrics': True, + 'extra_performance_metrics': True, + 'schema_size_metrics': True, + }, + 'tags': tags.METRIC_TAGS, + 'queries': [ + { + 'query': "SELECT * from testdb.users where name='Alice' limit 1;", + 'metric': 'alice.age', + 'type': 'gauge', + 'field': 'age' + }, + { + 'query': "SELECT * from testdb.users where name='Bob' limit 1;", + 'metric': 'bob.age', + 'type': 'gauge', + 'field': 'age' + }, + ], + } + + +@pytest.fixture(scope='session') +def instance_error(): + return { + 'server': common.HOST, + 'user': 'unknown', + 'pass': common.PASS, + } + + +def connect_master(): + passw = common.MARIA_ROOT_PASS if MYSQL_FLAVOR == 'mariadb' else '' + conn = pymysql.connect(host=common.HOST, port=common.PORT, user='root', password=passw) + _setup_master(conn) + + +def connect_slave(): + pymysql.connect(host=common.HOST, port=common.SLAVE_PORT, user=common.USER, passwd=common.PASS) def _setup_master(conn): @@ -103,8 +112,8 @@ def _wait_for_it_script(): FIXME: relying on the filesystem layout is a bad idea, the testing helper should expose its path through the api instead """ - dir = os.path.join(common.TESTS_HELPER_DIR, 'scripts', 'wait-for-it.sh') - return os.path.abspath(dir) + script = os.path.join(common.TESTS_HELPER_DIR, 'scripts', 'wait-for-it.sh') + return os.path.abspath(script) def _mysql_docker_repo(): diff --git a/mysql/tests/tags.py b/mysql/tests/tags.py index 737a6c7a8e07d..c9f2fbcb23b09 100644 --- a/mysql/tests/tags.py +++ b/mysql/tests/tags.py @@ -1,8 +1,7 @@ -# (C) Datadog, Inc. 2010-2017 +# (C) Datadog, Inc. 2018 # All rights reserved -# Licensed under Simplified BSD License (see LICENSE) - -import common +# Licensed under a 3-clause BSD style license (see LICENSE) +from . import common METRIC_TAGS = ['tag1', 'tag2'] SC_TAGS = ['server:' + common.HOST, 'port:' + str(common.PORT), 'tag1', 'tag2'] diff --git a/mysql/tests/test_mysql.py b/mysql/tests/test_mysql.py index 753e6607d97b1..0837eb7dac092 100644 --- a/mysql/tests/test_mysql.py +++ b/mysql/tests/test_mysql.py @@ -1,25 +1,23 @@ -# (C) Datadog, Inc. 2010-2017 +# (C) Datadog, Inc. 2018 # All rights reserved -# Licensed under Simplified BSD License (see LICENSE) -from os import environ -import logging +# Licensed under a 3-clause BSD style license (see LICENSE) import copy import subprocess +from os import environ import mock -import pytest import psutil -from datadog_checks.mysql import MySql -from datadog_checks.utils.platform import Platform - -from . import common, variables, tags, common_config +import pytest -log = logging.getLogger('test_mysql') +from datadog_checks.base.utils.platform import Platform +from datadog_checks.mysql import MySql +from . import common, tags, variables -def test_minimal_config(aggregator, spin_up_mysql): +@pytest.mark.usefixtures('dd_environment') +def test_minimal_config(aggregator, instance_basic): mysql_check = MySql(common.CHECK_NAME, {}, {}) - mysql_check.check(common_config.MYSQL_MINIMAL_CONFIG) + mysql_check.check(instance_basic) # Test service check aggregator.assert_service_check('mysql.can_connect', status=MySql.OK, @@ -33,9 +31,10 @@ def test_minimal_config(aggregator, spin_up_mysql): aggregator.assert_metric(mname, at_least=0) -def test_complex_config(aggregator, spin_up_mysql): - mysql_check = MySql(common.CHECK_NAME, {}, {}, instances=[common_config.MYSQL_COMPLEX_CONFIG]) - mysql_check.check(common_config.MYSQL_COMPLEX_CONFIG) +@pytest.mark.usefixtures('dd_environment') +def test_complex_config(aggregator, instance_complex): + mysql_check = MySql(common.CHECK_NAME, {}, {}, instances=[instance_complex]) + mysql_check.check(instance_complex) # Test service check aggregator.assert_service_check('mysql.can_connect', status=MySql.OK, @@ -108,14 +107,15 @@ def test_complex_config(aggregator, spin_up_mysql): aggregator.assert_all_metrics_covered() -def test_connection_failure(aggregator, spin_up_mysql): +@pytest.mark.usefixtures('dd_environment') +def test_connection_failure(aggregator, instance_error): """ Service check reports connection failure """ - mysql_check = MySql(common.CHECK_NAME, {}, {}, instances=[common_config.CONNECTION_FAILURE]) + mysql_check = MySql(common.CHECK_NAME, {}, {}, instances=[instance_error]) with pytest.raises(Exception): - mysql_check.check(common_config.CONNECTION_FAILURE) + mysql_check.check(instance_error) aggregator.assert_service_check('mysql.can_connect', status=MySql.CRITICAL, tags=tags.SC_FAILURE_TAGS, count=1) @@ -123,9 +123,10 @@ def test_connection_failure(aggregator, spin_up_mysql): aggregator.assert_all_metrics_covered() -def test_complex_config_replica(aggregator, spin_up_mysql): +@pytest.mark.usefixtures('dd_environment') +def test_complex_config_replica(aggregator, instance_complex): mysql_check = MySql(common.CHECK_NAME, {}, {}) - config = copy.deepcopy(common_config.MYSQL_COMPLEX_CONFIG) + config = copy.deepcopy(instance_complex) config['port'] = common.SLAVE_PORT mysql_check.check(config) @@ -139,9 +140,6 @@ def test_complex_config_replica(aggregator, spin_up_mysql): aggregator.assert_service_check('mysql.replication.slave_running', status=MySql.OK, tags=tags.SC_TAGS_REPLICA, at_least=1) - ver = map(lambda x: int(x), mysql_check.mysql_version[mysql_check._get_host_key()]) - ver = tuple(ver) - testable_metrics = (variables.STATUS_VARS + variables.VARIABLES_VARS + variables.INNODB_VARS + variables.BINLOG_VARS + variables.SYSTEM_METRICS + variables.SCHEMA_VARS + diff --git a/mysql/tests/variables.py b/mysql/tests/variables.py index e71862c4b0859..96502148d8b70 100644 --- a/mysql/tests/variables.py +++ b/mysql/tests/variables.py @@ -1,7 +1,6 @@ -# (C) Datadog, Inc. 2010-2017 +# (C) Datadog, Inc. 2018 # All rights reserved -# Licensed under Simplified BSD License (see LICENSE) - +# Licensed under a 3-clause BSD style license (see LICENSE) STATUS_VARS = [ # Command Metrics 'mysql.performance.slow_queries', diff --git a/mysql/tox.ini b/mysql/tox.ini index fa1974b85b641..e47eecc1c10e0 100644 --- a/mysql/tox.ini +++ b/mysql/tox.ini @@ -2,12 +2,10 @@ minversion = 2.0 basepython = py27 envlist = - unit - mysql55 - mysql56 - mysql57 - maria10130 - flake8 + {5.5,5.6,5.7} + py{27,36}-{maria} + unit + flake8 [testenv] usedevelop = true @@ -21,32 +19,19 @@ deps = commands = pip install --require-hashes -r requirements.txt pytest -v -m"not unit" +setenv = + MYSQL_FLAVOR=mysql + 5.5: MYSQL_VERSION=5.5 + 5.6: MYSQL_VERSION=5.6 + 5.7: MYSQL_VERSION=5.7 + maria: MYSQL_FLAVOR=mariadb + maria: MYSQL_VERSION=10.1.30-r1 [testenv:unit] commands = pip install --require-hashes -r requirements.txt pytest -v -m"unit" -[testenv:mysql55] -setenv = - MYSQL_FLAVOR=mysql - MYSQL_VERSION=5.5 - -[testenv:mysql56] -setenv = - MYSQL_FLAVOR=mysql - MYSQL_VERSION=5.6 - -[testenv:mysql57] -setenv = - MYSQL_FLAVOR=mysql - MYSQL_VERSION=5.7 - -[testenv:maria10130] -setenv = - MYSQL_VERSION=10.1.30-r1 - MYSQL_FLAVOR=mariadb - [testenv:flake8] skip_install = true deps = flake8