From 42ccb88462753f8427287bf789cd454b02add5ad Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Fri, 18 Oct 2019 17:36:46 +0200 Subject: [PATCH 01/27] Remove multi instance from code --- postgres/datadog_checks/postgres/postgres.py | 375 +++++++++---------- postgres/tests/conftest.py | 2 +- postgres/tests/test_custom_metrics.py | 4 +- postgres/tests/test_integration.py | 4 +- postgres/tests/test_relations.py | 6 +- postgres/tests/test_unit.py | 79 ++-- 6 files changed, 217 insertions(+), 253 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 47e5a104daac6..2352396e1fe6a 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -51,10 +51,6 @@ SSL_MODES = {'disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full'} -class ShouldRestartException(Exception): - pass - - class PostgreSql(AgentCheck): """Collects per-database, and optionally per-relation metrics, custom metrics""" @@ -68,17 +64,10 @@ class PostgreSql(AgentCheck): _known_servers = set() _known_servers_lock = threading.Lock() - def __init__(self, name, init_config, agentConfig, instances=None): - AgentCheck.__init__(self, name, init_config, agentConfig, instances) - self.dbs = {} - self.versions = {} - self.instance_metrics = {} - self.bgw_metrics = {} - self.archiver_metrics = {} - self.db_bgw_metrics = [] - self.db_archiver_metrics = [] - self.replication_metrics = {} - self.activity_metrics = {} + def __init__(self, name, init_config, instances=None): + AgentCheck.__init__(self, name, init_config, instances) + self._clean_state() + self.db = None self.custom_metrics = {} # Deprecate custom_metrics in favor of custom_queries @@ -87,16 +76,46 @@ def __init__(self, name, init_config, agentConfig, instances=None): "DEPRECATION NOTICE: Please use the new custom_queries option " "rather than the now deprecated custom_metrics" ) + host = self.instance.get('host', '') + port = self.instance.get('port', '') + if port != '': + port = int(port) + dbname = self.instance.get('dbname', 'postgres') + self.relations = self.instance.get('relations', []) + if self.relations and not dbname: + raise ConfigurationError('"dbname" parameter must be set when using the "relations" parameter.') + + self.key = (host, port, dbname) + self.tags = self._build_tags(self.instance.get('tags', []), host, port, dbname) + + def _build_tags(self, custom_tags, host, port, dbname): + # Clean up tags in case there was a None entry in the instance + # e.g. if the yaml contains tags: but no actual tags + if custom_tags is None: + tags = [] + else: + tags = list(set(custom_tags)) - def _clean_state(self, key): - self.versions.pop(key, None) - self.instance_metrics.pop(key, None) - self.bgw_metrics.pop(key, None) - self.archiver_metrics.pop(key, None) + # preset tags to host + tags.append('server:{}'.format(host)) + if port: + tags.append('port:{}'.format(port)) + else: + tags.append('port:socket') + + # preset tags to the database name + tags.extend(["db:%s" % dbname]) + return tags + + def _clean_state(self): + self.version = None + self.instance_metrics = None + self.bgw_metrics = None + self.archiver_metrics = None self.db_bgw_metrics = [] self.db_archiver_metrics = [] - self.replication_metrics.pop(key, None) - self.activity_metrics.pop(key, None) + self.replication_metrics = None + self.activity_metrics = None @classmethod def _server_known(cls, host, port): @@ -114,15 +133,15 @@ def _set_server_known(cls, host, port): with PostgreSql._known_servers_lock: PostgreSql._known_servers.add((host, port)) - def _get_replication_role(self, key, db): + def _get_replication_role(self, db): cursor = db.cursor() cursor.execute('SELECT pg_is_in_recovery();') role = cursor.fetchone()[0] # value fetched for role is of return "standby" if role else "master" - def _get_version(self, key, db): - if key not in self.versions: + def _get_version(self, db): + if self.version is None: cursor = db.cursor() cursor.execute('SHOW SERVER_VERSION;') version = cursor.fetchone()[0] @@ -141,13 +160,13 @@ def _get_version(self, key, db): version_parts[1] = -1 version = [int(part) for part in version_parts] - self.versions[key] = version + self.version = version - self.service_metadata('version', self.versions[key]) - return self.versions[key] + self.service_metadata('version', self.version) + return self.version - def _is_above(self, key, db, version_to_compare): - version = self._get_version(key, db) + def _is_above(self, db, version_to_compare): + version = self._get_version(db) if type(version) == list: # iterate from major down to bugfix for v, vc in zip_longest(version, version_to_compare, fillvalue=0): @@ -158,28 +177,27 @@ def _is_above(self, key, db, version_to_compare): # return True if version is the same return True - return False - def _is_8_3_or_above(self, key, db): - return self._is_above(key, db, [8, 3, 0]) + def _is_8_3_or_above(self, db): + return self._is_above(db, [8, 3, 0]) - def _is_9_1_or_above(self, key, db): - return self._is_above(key, db, [9, 1, 0]) + def _is_9_1_or_above(self, db): + return self._is_above(db, [9, 1, 0]) - def _is_9_2_or_above(self, key, db): - return self._is_above(key, db, [9, 2, 0]) + def _is_9_2_or_above(self, db): + return self._is_above(db, [9, 2, 0]) - def _is_9_4_or_above(self, key, db): - return self._is_above(key, db, [9, 4, 0]) + def _is_9_4_or_above(self, db): + return self._is_above(db, [9, 4, 0]) - def _is_9_6_or_above(self, key, db): - return self._is_above(key, db, [9, 6, 0]) + def _is_9_6_or_above(self, db): + return self._is_above(db, [9, 6, 0]) - def _is_10_or_above(self, key, db): - return self._is_above(key, db, [10, 0, 0]) + def _is_10_or_above(self, db): + return self._is_above(db, [10, 0, 0]) - def _get_instance_metrics(self, key, db, database_size_metrics, collect_default_db): + def _get_instance_metrics(self, database_size_metrics, collect_default_db): """ Add NEWER_92_METRICS to the default set of COMMON_METRICS when server version is 9.2 or later. @@ -191,34 +209,34 @@ def _get_instance_metrics(self, key, db, database_size_metrics, collect_default_ monitoring different databases, we want to collect server metrics only once. See https://github.com/DataDog/dd-agent/issues/1211 """ - metrics = self.instance_metrics.get(key) + metrics = self.instance_metrics if metrics is None: - host, port, dbname = key + host, port, dbname = self.key # check whether we already collected server-wide metrics for this # postgres instance if self._server_known(host, port): # explicitly set instance metrics for this key to an empty list # so we don't get here more than once - self.instance_metrics[key] = [] + self.instance_metrics = [] self.log.debug( "Not collecting instance metrics for key: {} as " - "they are already collected by another instance".format(key) + "they are already collected by another instance".format(self.key) ) return None self._set_server_known(host, port) # select the right set of metrics to collect depending on postgres version - if self._is_9_2_or_above(key, db): - self.instance_metrics[key] = dict(COMMON_METRICS, **NEWER_92_METRICS) + if self._is_9_2_or_above(): + self.instance_metrics = dict(COMMON_METRICS, **NEWER_92_METRICS) else: - self.instance_metrics[key] = dict(COMMON_METRICS) + self.instance_metrics = dict(COMMON_METRICS) # add size metrics if needed if database_size_metrics: - self.instance_metrics[key].update(DATABASE_SIZE_METRICS) + self.instance_metrics.update(DATABASE_SIZE_METRICS) - metrics = self.instance_metrics.get(key) + metrics = self.instance_metrics # this will happen when the current key contains a postgres server that # we already know, let's avoid to collect duplicates @@ -242,36 +260,36 @@ def _get_instance_metrics(self, key, db, database_size_metrics, collect_default_ return res - def _get_bgw_metrics(self, key, db): + def _get_bgw_metrics(self, db): """Use either COMMON_BGW_METRICS or COMMON_BGW_METRICS + NEWER_92_BGW_METRICS depending on the postgres version. Uses a dictionary to save the result for each instance """ # Extended 9.2+ metrics if needed - metrics = self.bgw_metrics.get(key) + metrics = self.bgw_metrics if metrics is None: # Hack to make sure that if we have multiple instances that connect to # the same host, port, we don't collect metrics twice # as it will result in https://github.com/DataDog/dd-agent/issues/1211 - sub_key = key[:2] + sub_key = self.key[:2] if sub_key in self.db_bgw_metrics: - self.bgw_metrics[key] = None + self.bgw_metrics = None self.log.debug( "Not collecting bgw metrics for key: {0} as " - "they are already collected by another instance".format(key) + "they are already collected by another instance".format(self.key) ) return None self.db_bgw_metrics.append(sub_key) - self.bgw_metrics[key] = dict(COMMON_BGW_METRICS) - if self._is_9_1_or_above(key, db): - self.bgw_metrics[key].update(NEWER_91_BGW_METRICS) - if self._is_9_2_or_above(key, db): - self.bgw_metrics[key].update(NEWER_92_BGW_METRICS) + self.bgw_metrics = dict(COMMON_BGW_METRICS) + if self._is_9_1_or_abovedb(): + self.bgw_metrics.update(NEWER_91_BGW_METRICS) + if self._is_9_2_or_above(db): + self.bgw_metrics.update(NEWER_92_BGW_METRICS) - metrics = self.bgw_metrics.get(key) + metrics = self.bgw_metrics if not metrics: return None @@ -290,30 +308,30 @@ def _get_count_metrics(self, table_count_limit): ) return metrics - def _get_archiver_metrics(self, key, db): + def _get_archiver_metrics(self, db): """Use COMMON_ARCHIVER_METRICS to read from pg_stat_archiver as defined in 9.4 (first version to have this table). Uses a dictionary to save the result for each instance """ # While there's only one set for now, prepare for future additions to # the table, mirroring _get_bgw_metrics() - metrics = self.archiver_metrics.get(key) + metrics = self.archiver_metrics.get() - if self._is_9_4_or_above(key, db) and metrics is None: + if self._is_9_4_or_above(db) and metrics is None: # Collect from only one instance. See _get_bgw_metrics() for details on why. - sub_key = key[:2] + sub_key = self.key[:2] if sub_key in self.db_archiver_metrics: - self.archiver_metrics[key] = None + self.archiver_metrics = None self.log.debug( "Not collecting archiver metrics for key: {0} as " - "they are already collected by another instance".format(key) + "they are already collected by another instance".format(self.key) ) return None self.db_archiver_metrics.append(sub_key) - self.archiver_metrics[key] = dict(COMMON_ARCHIVER_METRICS) - metrics = self.archiver_metrics.get(key) + self.archiver_metrics = dict(COMMON_ARCHIVER_METRICS) + metrics = self.archiver_metrics if not metrics: return None @@ -325,37 +343,37 @@ def _get_archiver_metrics(self, key, db): 'relation': False, } - def _get_replication_metrics(self, key, db): + def _get_replication_metrics(self, db): """ Use either REPLICATION_METRICS_10, REPLICATION_METRICS_9_1, or REPLICATION_METRICS_9_1 + REPLICATION_METRICS_9_2, depending on the postgres version. Uses a dictionnary to save the result for each instance """ - metrics = self.replication_metrics.get(key) - if self._is_10_or_above(key, db) and metrics is None: - self.replication_metrics[key] = dict(REPLICATION_METRICS_10) - metrics = self.replication_metrics.get(key) - elif self._is_9_1_or_above(key, db) and metrics is None: - self.replication_metrics[key] = dict(REPLICATION_METRICS_9_1) - if self._is_9_2_or_above(key, db): - self.replication_metrics[key].update(REPLICATION_METRICS_9_2) - metrics = self.replication_metrics.get(key) + metrics = self.replication_metrics + if self._is_10_or_above(db) and metrics is None: + self.replication_metrics = dict(REPLICATION_METRICS_10) + metrics = self.replication_metrics + elif self._is_9_1_or_above(db) and metrics is None: + self.replication_metrics = dict(REPLICATION_METRICS_9_1) + if self._is_9_2_or_above(db): + self.replication_metrics.update(REPLICATION_METRICS_9_2) + metrics = self.replication_metrics return metrics - def _get_activity_metrics(self, key, db, user): + def _get_activity_metrics(self, db, user): """ Use ACTIVITY_METRICS_LT_8_3 or ACTIVITY_METRICS_8_3 or ACTIVITY_METRICS_9_2 depending on the postgres version in conjunction with ACTIVITY_QUERY_10 or ACTIVITY_QUERY_LT_10. Uses a dictionnary to save the result for each instance """ - metrics_data = self.activity_metrics.get(key) + metrics_data = self.activity_metrics if metrics_data is None: - query = ACTIVITY_QUERY_10 if self._is_10_or_above(key, db) else ACTIVITY_QUERY_LT_10 - if self._is_9_6_or_above(key, db): + query = ACTIVITY_QUERY_10 if self._is_10_or_above( db) else ACTIVITY_QUERY_LT_10 + if self._is_9_6_or_above(db): metrics_query = ACTIVITY_METRICS_9_6 - elif self._is_9_2_or_above(key, db): + elif self._is_9_2_or_above(db): metrics_query = ACTIVITY_METRICS_9_2 - elif self._is_8_3_or_above(key, db): + elif self._is_8_3_or_above(db): metrics_query = ACTIVITY_METRICS_8_3 else: metrics_query = ACTIVITY_METRICS_LT_8_3 @@ -365,7 +383,7 @@ def _get_activity_metrics(self, key, db, user): metrics_query[i] = q.format(dd__user=user) metrics = {k: v for k, v in zip(metrics_query, ACTIVITY_DD_METRICS)} - self.activity_metrics[key] = (metrics, query) + self.activity_metrics = (metrics, query) else: metrics, query = metrics_data @@ -403,11 +421,11 @@ def _build_relations_config(self, yamlconfig): self.log.warning('Unhandled relations config type: {}'.format(element)) return config - def _query_scope(self, cursor, scope, key, db, instance_tags, is_custom_metrics, relations_config): + def _query_scope(self, cursor, scope, db, instance_tags, is_custom_metrics, relations_config): if scope is None: return None - if scope == REPLICATION_METRICS or not self._is_above(key, db, [9, 0, 0]): + if scope == REPLICATION_METRICS or not self._is_above(db, [9, 0, 0]): log_func = self.log.debug else: log_func = self.log.warning @@ -435,9 +453,9 @@ def _query_scope(self, cursor, scope, key, db, instance_tags, is_custom_metrics, log_func(e) log_func( "It seems the PG version has been incorrectly identified as %s. " - "A reattempt to identify the right version will happen on next agent run." % self.versions[key] + "A reattempt to identify the right version will happen on next agent run." % self.version ) - self._clean_state(key) + self._clean_state() db.rollback() except (psycopg2.ProgrammingError, psycopg2.errors.QueryCanceled) as e: log_func("Not all metrics may be available: %s" % str(e)) @@ -519,7 +537,6 @@ def _query_scope(self, cursor, scope, key, db, instance_tags, is_custom_metrics, def _collect_stats( self, - key, db, user, instance_tags, @@ -538,9 +555,9 @@ def _collect_stats( If custom_metrics is not an empty list, gather custom metrics defined in postgres.yaml """ - db_instance_metrics = self._get_instance_metrics(key, db, collect_database_size_metrics, collect_default_db) - bgw_instance_metrics = self._get_bgw_metrics(key, db) - archiver_instance_metrics = self._get_archiver_metrics(key, db) + db_instance_metrics = self._get_instance_metrics(collect_database_size_metrics, collect_default_db) + bgw_instance_metrics = self._get_bgw_metrics(db) + archiver_instance_metrics = self._get_archiver_metrics(db) metric_scope = [CONNECTION_METRICS, LOCK_METRICS] @@ -555,49 +572,44 @@ def _collect_stats( metric_scope += [REL_METRICS, IDX_METRICS, SIZE_METRICS, STATIO_METRICS] relations_config = self._build_relations_config(relations) - replication_metrics = self._get_replication_metrics(key, db) + replication_metrics = self._get_replication_metrics(db) if replication_metrics is not None: # FIXME: constants shouldn't be modified REPLICATION_METRICS['metrics'] = replication_metrics metric_scope.append(REPLICATION_METRICS) - try: - cursor = db.cursor() - results_len = self._query_scope( - cursor, db_instance_metrics, key, db, instance_tags, False, relations_config + cursor = db.cursor() + results_len = self._query_scope( + cursor, db_instance_metrics, db, instance_tags, False, relations_config + ) + if results_len is not None: + self.gauge( + "postgresql.db.count", results_len, tags=[t for t in instance_tags if not t.startswith("db:")] ) - if results_len is not None: - self.gauge( - "postgresql.db.count", results_len, tags=[t for t in instance_tags if not t.startswith("db:")] - ) - - self._query_scope(cursor, bgw_instance_metrics, key, db, instance_tags, False, relations_config) - self._query_scope(cursor, archiver_instance_metrics, key, db, instance_tags, False, relations_config) - if collect_activity_metrics: - activity_metrics = self._get_activity_metrics(key, db, user) - self._query_scope(cursor, activity_metrics, key, db, instance_tags, False, relations_config) + self._query_scope(cursor, bgw_instance_metrics, db, instance_tags, False, relations_config) + self._query_scope(cursor, archiver_instance_metrics, db, instance_tags, False, relations_config) - for scope in list(metric_scope) + custom_metrics: - self._query_scope(cursor, scope, key, db, instance_tags, scope in custom_metrics, relations_config) + if collect_activity_metrics: + activity_metrics = self._get_activity_metrics(db, user) + self._query_scope(cursor, activity_metrics, db, instance_tags, False, relations_config) - cursor.close() + for scope in list(metric_scope) + custom_metrics: + self._query_scope(cursor, scope, db, instance_tags, scope in custom_metrics, relations_config) - except (psycopg2.InterfaceError, socket.error) as e: - self.log.error("Connection error: %s" % str(e)) - raise ShouldRestartException + cursor.close() @classmethod - def _get_service_check_tags(cls, host, port, tags): + def _get_service_check_tags(cls, host, tags): service_check_tags = ["host:%s" % host] service_check_tags.extend(tags) service_check_tags = list(set(service_check_tags)) return service_check_tags - def get_connection(self, key, host, port, user, password, dbname, ssl, tags, use_cached=True): + def get_connection(self, host, port, user, password, dbname, ssl, tags, use_cached=True): """Get and memoize connections to instances""" - if key in self.dbs and use_cached: - conn = self.dbs[key] + if self.db and use_cached: + conn = self.db if conn.status != psycopg2.extensions.STATUS_READY: # Some transaction went wrong and the connection is in an unhealthy state. Let's fix that conn.rollback() @@ -628,11 +640,11 @@ def get_connection(self, key, host, port, user, password, dbname, ssl, tags, use sslmode=ssl, application_name="datadog-agent", ) - self.dbs[key] = connection + self.db = connection return connection except Exception as e: message = u'Error establishing postgres connection: %s' % (str(e)) - service_check_tags = self._get_service_check_tags(host, port, tags) + service_check_tags = self._get_service_check_tags(host, tags) self.service_check( self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=message ) @@ -737,10 +749,10 @@ def _get_custom_queries(self, db, tags, custom_queries): metric, value, method = info getattr(self, method)(metric, value, tags=query_tags) - def _get_custom_metrics(self, custom_metrics, key): + def _get_custom_metrics(self, custom_metrics): # Pre-processed cached custom_metrics - if key in self.custom_metrics: - return self.custom_metrics[key] + if self.custom_metrics is not None: + return self.custom_metrics # Otherwise pre-process custom metrics and verify definition required_parameters = ("descriptors", "metrics", "query", "relation") @@ -774,94 +786,49 @@ def _get_custom_metrics(self, custom_metrics, key): except Exception as e: raise Exception('Error processing custom metric `{}`: {}'.format(m, e)) - self.custom_metrics[key] = custom_metrics + self.custom_metrics = custom_metrics return custom_metrics def check(self, instance): - host = instance.get('host', '') - port = instance.get('port', '') - if port != '': - port = int(port) - user = instance.get('username', '') - password = instance.get('password', '') - tags = instance.get('tags', []) - dbname = instance.get('dbname', None) - relations = instance.get('relations', []) - ssl = instance.get('ssl', False) + ssl = self.instance.get('ssl', False) if ssl not in SSL_MODES: ssl = 'require' if is_affirmative(ssl) else 'disable' - table_count_limit = instance.get('table_count_limit', TABLE_COUNT_LIMIT) - collect_function_metrics = is_affirmative(instance.get('collect_function_metrics', False)) - # Default value for `count_metrics` is True for backward compatibility - collect_count_metrics = is_affirmative(instance.get('collect_count_metrics', True)) - collect_activity_metrics = is_affirmative(instance.get('collect_activity_metrics', False)) - collect_database_size_metrics = is_affirmative(instance.get('collect_database_size_metrics', True)) - collect_default_db = is_affirmative(instance.get('collect_default_database', False)) - tag_replication_role = is_affirmative(instance.get('tag_replication_role', False)) - if relations and not dbname: - self.warning('"dbname" parameter must be set when using the "relations" parameter.') + user = self.instance.get('username', '') + password = self.instance.get('password', '') - if dbname is None: - dbname = 'postgres' - - key = (host, port, dbname) + table_count_limit = self.instance.get('table_count_limit', TABLE_COUNT_LIMIT) + collect_function_metrics = is_affirmative(self.instance.get('collect_function_metrics', False)) + # Default value for `count_metrics` is True for backward compatibility + collect_count_metrics = is_affirmative(self.instance.get('collect_count_metrics', True)) + collect_activity_metrics = is_affirmative(self.instance.get('collect_activity_metrics', False)) + collect_database_size_metrics = is_affirmative(self.instance.get('collect_database_size_metrics', True)) + collect_default_db = is_affirmative(self.instance.get('collect_default_database', False)) - custom_metrics = self._get_custom_metrics(instance.get('custom_metrics', []), key) + custom_metrics = self._get_custom_metrics(instance.get('custom_metrics', [])) custom_queries = instance.get('custom_queries', []) - # Clean up tags in case there was a None entry in the instance - # e.g. if the yaml contains tags: but no actual tags - if tags is None: - tags = [] - else: - tags = list(set(tags)) - - # preset tags to host - tags.append('server:{}'.format(host)) - if port: - tags.append('port:{}'.format(port)) - else: - tags.append('port:socket') - - # preset tags to the database name - tags.extend(["db:%s" % dbname]) + (host, port, dbname) = self.key self.log.debug("Custom metrics: %s" % custom_metrics) + tag_replication_role = is_affirmative(self.instance.get('tag_replication_role', False)) + tags = self.tags + # Collect metrics + db = None try: # Check version - db = self.get_connection(key, host, port, user, password, dbname, ssl, tags) - version = self._get_version(key, db) - self.log.debug("Running check against version %s" % version) + db = self.get_connection(host, port, user, password, dbname, ssl, tags) if tag_replication_role: - tags.extend(["replication_role:{}".format(self._get_replication_role(key, db))]) - self._collect_stats( - key, - db, - user, - tags, - relations, - custom_metrics, - table_count_limit, - collect_function_metrics, - collect_count_metrics, - collect_activity_metrics, - collect_database_size_metrics, - collect_default_db, - ) - self._get_custom_queries(db, tags, custom_queries) - except ShouldRestartException: - self.log.info("Resetting the connection") - self._clean_state(key) - db = self.get_connection(key, host, port, user, password, dbname, ssl, tags, use_cached=False) + tags.extend(["replication_role:{}".format(self._get_replication_role(db))]) + version = self._get_version(db) + self.log.debug("Running check against version %s" % version) self._collect_stats( - key, db, user, tags, - relations, + self.relations, custom_metrics, table_count_limit, collect_function_metrics, @@ -871,12 +838,16 @@ def check(self, instance): collect_default_db, ) self._get_custom_queries(db, tags, custom_queries) + except (psycopg2.InterfaceError, socket.error) as e: + self.log.info("Connection error, will retry on next agent run") + self._clean_state() - service_check_tags = self._get_service_check_tags(host, port, tags) - message = u'Established connection to postgres://%s:%s/%s' % (host, port, dbname) - self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags, message=message) - try: - # commit to close the current query transaction - db.commit() - except Exception as e: - self.log.warning("Unable to commit: {0}".format(e)) + if db is not None: + service_check_tags = self._get_service_check_tags(host, tags) + message = u'Established connection to postgres://%s:%s/%s' % (host, port, dbname) + self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags, message=message) + try: + # commit to close the current query transaction + db.commit() + except Exception as e: + self.log.warning("Unable to commit: {0}".format(e)) diff --git a/postgres/tests/conftest.py b/postgres/tests/conftest.py index 7864cbb4ce075..14ae863dd2f54 100644 --- a/postgres/tests/conftest.py +++ b/postgres/tests/conftest.py @@ -30,7 +30,7 @@ def dd_environment(e2e_instance): @pytest.fixture def check(): - c = PostgreSql('postgres', {}, {}) + c = PostgreSql('postgres', {}, [{'dbname': 'dbname', 'host':'localhost', 'port': '5432'}]) c._is_9_2_or_above = mock.MagicMock() PostgreSql._known_servers = set() # reset the global state return c diff --git a/postgres/tests/test_custom_metrics.py b/postgres/tests/test_custom_metrics.py index 3cec7d4f37cad..06c2401b76f80 100644 --- a/postgres/tests/test_custom_metrics.py +++ b/postgres/tests/test_custom_metrics.py @@ -24,7 +24,7 @@ def test_custom_metrics(aggregator, pg_instance): ], } ) - postgres_check = PostgreSql('postgres', {}, {}) + postgres_check = PostgreSql('postgres', {}, []) postgres_check.check(pg_instance) tags = ['customdb:a', 'server:{}'.format(pg_instance['host']), 'port:{}'.format(pg_instance['port'])] @@ -54,7 +54,7 @@ def test_custom_queries(aggregator, pg_instance): ] } ) - postgres_check = PostgreSql('postgres', {}, {}) + postgres_check = PostgreSql('postgres', {}, []) postgres_check.check(pg_instance) tags = [ 'db:{}'.format(pg_instance['dbname']), diff --git a/postgres/tests/test_integration.py b/postgres/tests/test_integration.py index 08f6091c9d81d..040c3915d9d21 100644 --- a/postgres/tests/test_integration.py +++ b/postgres/tests/test_integration.py @@ -4,9 +4,9 @@ import mock import psycopg2 import pytest +import socket from datadog_checks.postgres import PostgreSql -from datadog_checks.postgres.postgres import ShouldRestartException from .common import DB_NAME, HOST, PORT, POSTGRES_VERSION, check_bgw_metrics, check_common_metrics from .utils import requires_over_10 @@ -141,7 +141,7 @@ def throw_exception_first_time(*args, **kwargs): if throw_exception_first_time.counter > 1: pass # avoid throwing exception again else: - raise ShouldRestartException + raise socket.error throw_exception_first_time.counter = 0 diff --git a/postgres/tests/test_relations.py b/postgres/tests/test_relations.py index 908949d9029f5..4186786c78c5a 100644 --- a/postgres/tests/test_relations.py +++ b/postgres/tests/test_relations.py @@ -40,7 +40,7 @@ def test_relations_metrics(aggregator, pg_instance): pg_instance['relations'] = ['persons'] - posgres_check = PostgreSql('postgres', {}, {}) + posgres_check = PostgreSql('postgres', {}, [pg_instance]) posgres_check.check(pg_instance) expected_tags = pg_instance['tags'] + [ @@ -79,7 +79,7 @@ def test_relations_metrics2(aggregator, pg_instance): {'relation_regex': r'[pP]ersons[-_]?(dup\d)?'}, ] relations = ['persons', 'personsdup1', 'Personsdup2'] - posgres_check = PostgreSql('postgres', {}, {}) + posgres_check = PostgreSql('postgres', {}, [pg_instance]) posgres_check.check(pg_instance) expected_tags = {} @@ -118,7 +118,7 @@ def test_index_metrics(aggregator, pg_instance): pg_instance['relations'] = ['breed'] pg_instance['dbname'] = 'dogs' - posgres_check = PostgreSql('postgres', {}, {}) + posgres_check = PostgreSql('postgres', {}, [pg_instance]) posgres_check.check(pg_instance) expected_tags = pg_instance['tags'] + [ diff --git a/postgres/tests/test_unit.py b/postgres/tests/test_unit.py index 3b63ba2002172..dfd08cf2760dc 100644 --- a/postgres/tests/test_unit.py +++ b/postgres/tests/test_unit.py @@ -10,15 +10,13 @@ # Mark the entire module as tests of type `unit` pytestmark = pytest.mark.unit -KEY = ('localhost', '5432', 'dbname') - def test_get_instance_metrics_lt_92(check): """ check output when 9.2+ """ check._is_9_2_or_above.return_value = False - res = check._get_instance_metrics(KEY, 'dbname', False, False) + res = check._get_instance_metrics(False, False) assert res['metrics'] == util.COMMON_METRICS @@ -27,7 +25,7 @@ def test_get_instance_metrics_92(check): check output when <9.2 """ check._is_9_2_or_above.return_value = True - res = check._get_instance_metrics(KEY, 'dbname', False, False) + res = check._get_instance_metrics(False, False) assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS) @@ -35,15 +33,15 @@ def test_get_instance_metrics_state(check): """ Ensure data is consistent when the function is called more than once """ - res = check._get_instance_metrics(KEY, 'dbname', False, False) + res = check._get_instance_metrics(False, False) assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS) check._is_9_2_or_above.side_effect = Exception # metrics were cached so this shouldn't be called - res = check._get_instance_metrics(KEY, 'dbname', [], False) + res = check._get_instance_metrics([], False) assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS) # also check what happens when `metrics` is not valid - check.instance_metrics[KEY] = [] - res = check._get_instance_metrics(KEY, 'dbname', False, False) + check.instance_metrics = [] + res = check._get_instance_metrics(False, False) assert res is None @@ -54,7 +52,7 @@ def test_get_instance_metrics_database_size_metrics(check): expected = util.COMMON_METRICS expected.update(util.NEWER_92_METRICS) expected.update(util.DATABASE_SIZE_METRICS) - res = check._get_instance_metrics(KEY, 'dbname', True, False) + res = check._get_instance_metrics(True, False) assert res['metrics'] == expected @@ -63,28 +61,14 @@ def test_get_instance_with_default(check): Test the contents of the query string with different `collect_default_db` values """ collect_default_db = False - res = check._get_instance_metrics(KEY, 'dbname', False, collect_default_db) + res = check._get_instance_metrics(False, collect_default_db) assert " AND psd.datname not ilike 'postgres'" in res['query'] collect_default_db = True - res = check._get_instance_metrics(KEY, 'dbname', False, collect_default_db) + res = check._get_instance_metrics(False, collect_default_db) assert " AND psd.datname not ilike 'postgres'" not in res['query'] -def test_get_instance_metrics_instance(check): - """ - Test the caching system preventing instance metrics to be collected more than - once when two instances are configured for the same server but different databases - """ - res = check._get_instance_metrics(KEY, 'dbname', False, False) - assert res is not None - # 2nd round, same host/port combo: we shouldn't collect anything - another = ('localhost', '5432', 'FOO') - res = check._get_instance_metrics(another, 'dbname', False, False) - assert res is None - assert check.instance_metrics[another] == [] - - def test_get_version(check): """ Test _get_version() to make sure the check is properly parsing Postgres versions @@ -93,23 +77,27 @@ def test_get_version(check): # Test #.#.# style versions db.cursor().fetchone.return_value = ['9.5.3'] - assert check._get_version('regular_version', db) == [9, 5, 3] + assert check._get_version(db) == [9, 5, 3] # Test #.# style versions db.cursor().fetchone.return_value = ['10.2'] - assert check._get_version('short_version', db) == [10, 2] + check._clean_state() + assert check._get_version(db) == [10, 2] # Test #beta# style versions db.cursor().fetchone.return_value = ['11beta3'] - assert check._get_version('beta_version', db) == [11, -1, 3] + check._clean_state() + assert check._get_version(db) == [11, -1, 3] # Test #rc# style versions db.cursor().fetchone.return_value = ['11rc1'] - assert check._get_version('rc_version', db) == [11, -1, 1] + check._clean_state() + assert check._get_version(db) == [11, -1, 1] # Test #unknown# style versions db.cursor().fetchone.return_value = ['11nightly3'] - assert check._get_version('unknown_version', db) == [11, -1, 3] + check._clean_state() + assert check._get_version(db) == [11, -1, 3] def test_is_above(check): @@ -120,40 +108,45 @@ def test_is_above(check): # Test major versions db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above('smaller major', db, [9, 5, 4]) - assert check._is_above('larger major', db, [11, 0, 0]) is False + assert check._is_above(db, [9, 5, 4]) + assert check._is_above(db, [11, 0, 0]) is False # Test minor versions db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above('smaller minor', db, [10, 4, 4]) - assert check._is_above('larger minor', db, [10, 6, 4]) is False + assert check._is_above(db, [10, 4, 4]) + assert check._is_above(db, [10, 6, 4]) is False # Test patch versions db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above('smaller patch', db, [10, 5, 3]) - assert check._is_above('larger patch', db, [10, 5, 5]) is False + assert check._is_above(db, [10, 5, 3]) + assert check._is_above(db, [10, 5, 5]) is False # Test same version, _is_above() returns True for greater than or equal to db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above('same_version', db, [10, 5, 4]) + assert check._is_above(db, [10, 5, 4]) # Test beta version above db.cursor().fetchone.return_value = ['11beta4'] - assert check._is_above('newer_beta_version', db, [11, -1, 3]) + check._clean_state() + check._clean_state() + assert check._is_above(db, [11, -1, 3]) # Test beta version against official version db.cursor().fetchone.return_value = ['11.0.0'] - assert check._is_above('official_release', db, [11, -1, 3]) + check._clean_state() + assert check._is_above(db, [11, -1, 3]) # Test versions of unequal length db.cursor().fetchone.return_value = ['10.0'] - assert check._is_above('unequal_length', db, [10, 0]) - assert check._is_above('unequal_length', db, [10, 0, 0]) - assert check._is_above('unequal_length', db, [10, 0, 1]) is False + check._clean_state() + assert check._is_above(db, [10, 0]) + assert check._is_above(db, [10, 0, 0]) + assert check._is_above(db, [10, 0, 1]) is False # Test return value is not a list db.cursor().fetchone.return_value = "foo" - assert check._is_above('smth not a list', db, [10, 0]) is False + check._clean_state() + assert check._is_above(db, [10, 0]) is False def test_malformed_get_custom_queries(check): From ffe81db3feafc59d9f3f04fe952770c4001b0988 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Fri, 18 Oct 2019 18:09:24 +0200 Subject: [PATCH 02/27] Fix some tests --- postgres/datadog_checks/postgres/postgres.py | 12 +-- postgres/tests/conftest.py | 9 ++- postgres/tests/test_custom_metrics.py | 4 +- postgres/tests/test_integration.py | 78 ++++++++++---------- 4 files changed, 55 insertions(+), 48 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 2352396e1fe6a..03e555a937e11 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -64,14 +64,14 @@ class PostgreSql(AgentCheck): _known_servers = set() _known_servers_lock = threading.Lock() - def __init__(self, name, init_config, instances=None): + def __init__(self, name, init_config, instances): AgentCheck.__init__(self, name, init_config, instances) self._clean_state() self.db = None - self.custom_metrics = {} + self.custom_metrics = None # Deprecate custom_metrics in favor of custom_queries - if instances is not None and any('custom_metrics' in instance for instance in instances): + if any('custom_metrics' in instance for instance in instances): self.warning( "DEPRECATION NOTICE: Please use the new custom_queries option " "rather than the now deprecated custom_metrics" @@ -284,7 +284,7 @@ def _get_bgw_metrics(self, db): self.db_bgw_metrics.append(sub_key) self.bgw_metrics = dict(COMMON_BGW_METRICS) - if self._is_9_1_or_abovedb(): + if self._is_9_1_or_above(db): self.bgw_metrics.update(NEWER_91_BGW_METRICS) if self._is_9_2_or_above(db): self.bgw_metrics.update(NEWER_92_BGW_METRICS) @@ -315,9 +315,9 @@ def _get_archiver_metrics(self, db): """ # While there's only one set for now, prepare for future additions to # the table, mirroring _get_bgw_metrics() - metrics = self.archiver_metrics.get() + metrics = self.archiver_metrics - if self._is_9_4_or_above(db) and metrics is None: + if metrics is None and self._is_9_4_or_above(db): # Collect from only one instance. See _get_bgw_metrics() for details on why. sub_key = self.key[:2] if sub_key in self.db_archiver_metrics: diff --git a/postgres/tests/conftest.py b/postgres/tests/conftest.py index 14ae863dd2f54..b1a81675213ec 100644 --- a/postgres/tests/conftest.py +++ b/postgres/tests/conftest.py @@ -30,7 +30,14 @@ def dd_environment(e2e_instance): @pytest.fixture def check(): - c = PostgreSql('postgres', {}, [{'dbname': 'dbname', 'host':'localhost', 'port': '5432'}]) + c = PostgreSql('postgres', {}, [{'dbname': 'dbname', 'host': 'localhost', 'port': '5432'}]) + c._is_9_2_or_above = mock.MagicMock() + PostgreSql._known_servers = set() # reset the global state + return c + +@pytest.fixture +def integration_check(): + c = PostgreSql('postgres', {}, [{'host': HOST, 'port': PORT, 'username': USER, 'password': PASSWORD, 'dbname': DB_NAME, 'tags': ['foo:bar']}]) c._is_9_2_or_above = mock.MagicMock() PostgreSql._known_servers = set() # reset the global state return c diff --git a/postgres/tests/test_custom_metrics.py b/postgres/tests/test_custom_metrics.py index 06c2401b76f80..7871a9508efdc 100644 --- a/postgres/tests/test_custom_metrics.py +++ b/postgres/tests/test_custom_metrics.py @@ -24,7 +24,7 @@ def test_custom_metrics(aggregator, pg_instance): ], } ) - postgres_check = PostgreSql('postgres', {}, []) + postgres_check = PostgreSql('postgres', {}, [pg_instance]) postgres_check.check(pg_instance) tags = ['customdb:a', 'server:{}'.format(pg_instance['host']), 'port:{}'.format(pg_instance['port'])] @@ -54,7 +54,7 @@ def test_custom_queries(aggregator, pg_instance): ] } ) - postgres_check = PostgreSql('postgres', {}, []) + postgres_check = PostgreSql('postgres', {}, [pg_instance]) postgres_check.check(pg_instance) tags = [ 'db:{}'.format(pg_instance['dbname']), diff --git a/postgres/tests/test_integration.py b/postgres/tests/test_integration.py index 040c3915d9d21..229385662a912 100644 --- a/postgres/tests/test_integration.py +++ b/postgres/tests/test_integration.py @@ -23,8 +23,8 @@ @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_common_metrics(aggregator, check, pg_instance): - check.check(pg_instance) +def test_common_metrics(aggregator, integration_check, pg_instance): + integration_check.check(pg_instance) expected_tags = pg_instance['tags'] + ['server:{}'.format(HOST), 'port:{}'.format(PORT)] check_bgw_metrics(aggregator, expected_tags) @@ -35,30 +35,30 @@ def test_common_metrics(aggregator, check, pg_instance): @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_common_metrics_without_size(aggregator, check, pg_instance): +def test_common_metrics_without_size(aggregator, integration_check, pg_instance): pg_instance['collect_database_size_metrics'] = False - check.check(pg_instance) + integration_check.check(pg_instance) assert 'postgresql.database_size' not in aggregator.metric_names @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_can_connect_service_check(aggregator, check, pg_instance): +def test_can_connect_service_check(aggregator, integration_check, pg_instance): expected_tags = pg_instance['tags'] + [ 'host:{}'.format(HOST), 'server:{}'.format(HOST), 'port:{}'.format(PORT), 'db:{}'.format(DB_NAME), ] - check.check(pg_instance) + integration_check.check(pg_instance) aggregator.assert_service_check('postgres.can_connect', count=1, status=PostgreSql.OK, tags=expected_tags) @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_schema_metrics(aggregator, check, pg_instance): +def test_schema_metrics(aggregator, integration_check, pg_instance): pg_instance['table_count_limit'] = 1 - check.check(pg_instance) + integration_check.check(pg_instance) expected_tags = pg_instance['tags'] + [ 'db:{}'.format(DB_NAME), @@ -72,8 +72,8 @@ def test_schema_metrics(aggregator, check, pg_instance): @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_connections_metrics(aggregator, check, pg_instance): - check.check(pg_instance) +def test_connections_metrics(aggregator, integration_check, pg_instance): + integration_check.check(pg_instance) expected_tags = pg_instance['tags'] + ['server:{}'.format(HOST), 'port:{}'.format(PORT)] for name in CONNECTION_METRICS: @@ -84,11 +84,11 @@ def test_connections_metrics(aggregator, check, pg_instance): @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_locks_metrics(aggregator, check, pg_instance): +def test_locks_metrics(aggregator, integration_check, pg_instance): with psycopg2.connect(host=HOST, dbname=DB_NAME, user="postgres") as conn: with conn.cursor() as cur: cur.execute('LOCK persons') - check.check(pg_instance) + integration_check.check(pg_instance) expected_tags = pg_instance['tags'] + [ 'server:{}'.format(HOST), @@ -103,9 +103,9 @@ def test_locks_metrics(aggregator, check, pg_instance): @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_activity_metrics(aggregator, check, pg_instance): +def test_activity_metrics(aggregator, integration_check, pg_instance): pg_instance['collect_activity_metrics'] = True - check.check(pg_instance) + integration_check.check(pg_instance) expected_tags = pg_instance['tags'] + ['server:{}'.format(HOST), 'port:{}'.format(PORT), 'db:datadog_test'] for name in ACTIVITY_METRICS: @@ -115,26 +115,26 @@ def test_activity_metrics(aggregator, check, pg_instance): @requires_over_10 @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_wrong_version(aggregator, check, pg_instance): +def test_wrong_version(aggregator, integration_check, pg_instance): # Enforce to cache wrong version db_key = ('localhost', 5432, 'datadog_test') - check.versions[db_key] = [9, 6, 0] + integration_check.version = [9, 6, 0] - check.check(pg_instance) - assert_state_clean(check, db_key) + integration_check.check(pg_instance) + assert_state_clean(integration_check) - check.check(pg_instance) - assert check.versions[db_key][0] == int(POSTGRES_VERSION) - assert_state_set(check, db_key) + integration_check.check(pg_instance) + assert integration_check.version[0] == int(POSTGRES_VERSION) + assert_state_set(integration_check) @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_state_clears_on_connection_error(check, pg_instance): +def test_state_clears_on_connection_error(integration_check, pg_instance): db_key = ('localhost', 5432, 'datadog_test') - check.check(pg_instance) - assert_state_set(check, db_key) + integration_check.check(pg_instance) + assert_state_set(integration_check, db_key) def throw_exception_first_time(*args, **kwargs): throw_exception_first_time.counter += 1 @@ -146,27 +146,27 @@ def throw_exception_first_time(*args, **kwargs): throw_exception_first_time.counter = 0 with mock.patch('datadog_checks.postgres.PostgreSql._collect_stats', side_effect=throw_exception_first_time): - check.check(pg_instance) - assert_state_clean(check, db_key) + integration_check.check(pg_instance) + assert_state_clean(integration_check, db_key) -def assert_state_clean(check, db_key): - assert db_key not in check.versions - assert db_key not in check.instance_metrics - assert db_key not in check.bgw_metrics - assert db_key not in check.archiver_metrics +def assert_state_clean(check): + assert check.version is None + assert check.instance_metrics is None + assert check.bgw_metrics is None + assert check.archiver_metrics is None assert check.db_bgw_metrics == [] assert check.db_archiver_metrics == [] - assert db_key not in check.replication_metrics - assert db_key not in check.activity_metrics + assert check.replication_metrics is None + assert check.activity_metrics is None -def assert_state_set(check, db_key): - assert db_key in check.versions - assert db_key in check.instance_metrics - assert db_key in check.bgw_metrics +def assert_state_set(check,): + assert check.versions + assert check.instance_metrics + assert check.bgw_metrics if POSTGRES_VERSION != '9.3': - assert db_key in check.archiver_metrics + assert check.archiver_metrics assert check.db_archiver_metrics != [] assert check.db_bgw_metrics != [] - assert db_key in check.replication_metrics + assert check.replication_metrics From 294602f1dc34dc6a0313ed749752d4d168ba5a49 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Mon, 21 Oct 2019 11:45:33 +0200 Subject: [PATCH 03/27] Remove known hosts --- postgres/datadog_checks/postgres/postgres.py | 37 --------------- postgres/tests/conftest.py | 10 ++-- postgres/tests/test_integration.py | 48 +++++++++++--------- postgres/tests/test_relations.py | 12 ++--- postgres/tests/test_unit.py | 5 -- 5 files changed, 39 insertions(+), 73 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 03e555a937e11..8b04c165b1593 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -60,9 +60,6 @@ class PostgreSql(AgentCheck): MONOTONIC = AgentCheck.monotonic_count SERVICE_CHECK_NAME = 'postgres.can_connect' - # keep track of host/port present in any configured instance - _known_servers = set() - _known_servers_lock = threading.Lock() def __init__(self, name, init_config, instances): AgentCheck.__init__(self, name, init_config, instances) @@ -117,21 +114,6 @@ def _clean_state(self): self.replication_metrics = None self.activity_metrics = None - @classmethod - def _server_known(cls, host, port): - """ - Return whether the hostname and port combination was already seen - """ - with PostgreSql._known_servers_lock: - return (host, port) in PostgreSql._known_servers - - @classmethod - def _set_server_known(cls, host, port): - """ - Store the host/port combination for this server - """ - with PostgreSql._known_servers_lock: - PostgreSql._known_servers.add((host, port)) def _get_replication_role(self, db): cursor = db.cursor() @@ -212,20 +194,6 @@ def _get_instance_metrics(self, database_size_metrics, collect_default_db): metrics = self.instance_metrics if metrics is None: - host, port, dbname = self.key - # check whether we already collected server-wide metrics for this - # postgres instance - if self._server_known(host, port): - # explicitly set instance metrics for this key to an empty list - # so we don't get here more than once - self.instance_metrics = [] - self.log.debug( - "Not collecting instance metrics for key: {} as " - "they are already collected by another instance".format(self.key) - ) - return None - self._set_server_known(host, port) - # select the right set of metrics to collect depending on postgres version if self._is_9_2_or_above(): self.instance_metrics = dict(COMMON_METRICS, **NEWER_92_METRICS) @@ -238,11 +206,6 @@ def _get_instance_metrics(self, database_size_metrics, collect_default_db): metrics = self.instance_metrics - # this will happen when the current key contains a postgres server that - # we already know, let's avoid to collect duplicates - if not metrics: - return None - res = { 'descriptors': [('psd.datname', 'db')], 'metrics': metrics, diff --git a/postgres/tests/conftest.py b/postgres/tests/conftest.py index b1a81675213ec..dd826effef361 100644 --- a/postgres/tests/conftest.py +++ b/postgres/tests/conftest.py @@ -35,12 +35,14 @@ def check(): PostgreSql._known_servers = set() # reset the global state return c + @pytest.fixture def integration_check(): - c = PostgreSql('postgres', {}, [{'host': HOST, 'port': PORT, 'username': USER, 'password': PASSWORD, 'dbname': DB_NAME, 'tags': ['foo:bar']}]) - c._is_9_2_or_above = mock.MagicMock() - PostgreSql._known_servers = set() # reset the global state - return c + def _check(instance): + c = PostgreSql('postgres', {}, [instance]) + c._is_9_2_or_above = mock.MagicMock() + return c + return _check @pytest.fixture diff --git a/postgres/tests/test_integration.py b/postgres/tests/test_integration.py index 229385662a912..b98da67590090 100644 --- a/postgres/tests/test_integration.py +++ b/postgres/tests/test_integration.py @@ -24,7 +24,8 @@ @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') def test_common_metrics(aggregator, integration_check, pg_instance): - integration_check.check(pg_instance) + check = integration_check(pg_instance) + check.check(pg_instance) expected_tags = pg_instance['tags'] + ['server:{}'.format(HOST), 'port:{}'.format(PORT)] check_bgw_metrics(aggregator, expected_tags) @@ -37,20 +38,22 @@ def test_common_metrics(aggregator, integration_check, pg_instance): @pytest.mark.usefixtures('dd_environment') def test_common_metrics_without_size(aggregator, integration_check, pg_instance): pg_instance['collect_database_size_metrics'] = False - integration_check.check(pg_instance) + check = integration_check(pg_instance) + check.check(pg_instance) assert 'postgresql.database_size' not in aggregator.metric_names @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') def test_can_connect_service_check(aggregator, integration_check, pg_instance): + check = integration_check(pg_instance) expected_tags = pg_instance['tags'] + [ 'host:{}'.format(HOST), 'server:{}'.format(HOST), 'port:{}'.format(PORT), 'db:{}'.format(DB_NAME), ] - integration_check.check(pg_instance) + check.check(pg_instance) aggregator.assert_service_check('postgres.can_connect', count=1, status=PostgreSql.OK, tags=expected_tags) @@ -58,7 +61,8 @@ def test_can_connect_service_check(aggregator, integration_check, pg_instance): @pytest.mark.usefixtures('dd_environment') def test_schema_metrics(aggregator, integration_check, pg_instance): pg_instance['table_count_limit'] = 1 - integration_check.check(pg_instance) + check = integration_check(pg_instance) + check.check(pg_instance) expected_tags = pg_instance['tags'] + [ 'db:{}'.format(DB_NAME), @@ -73,7 +77,8 @@ def test_schema_metrics(aggregator, integration_check, pg_instance): @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') def test_connections_metrics(aggregator, integration_check, pg_instance): - integration_check.check(pg_instance) + check = integration_check(pg_instance) + check.check(pg_instance) expected_tags = pg_instance['tags'] + ['server:{}'.format(HOST), 'port:{}'.format(PORT)] for name in CONNECTION_METRICS: @@ -85,10 +90,11 @@ def test_connections_metrics(aggregator, integration_check, pg_instance): @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') def test_locks_metrics(aggregator, integration_check, pg_instance): + check = integration_check(pg_instance) with psycopg2.connect(host=HOST, dbname=DB_NAME, user="postgres") as conn: with conn.cursor() as cur: cur.execute('LOCK persons') - integration_check.check(pg_instance) + check.check(pg_instance) expected_tags = pg_instance['tags'] + [ 'server:{}'.format(HOST), @@ -105,7 +111,8 @@ def test_locks_metrics(aggregator, integration_check, pg_instance): @pytest.mark.usefixtures('dd_environment') def test_activity_metrics(aggregator, integration_check, pg_instance): pg_instance['collect_activity_metrics'] = True - integration_check.check(pg_instance) + check = integration_check(pg_instance) + check.check(pg_instance) expected_tags = pg_instance['tags'] + ['server:{}'.format(HOST), 'port:{}'.format(PORT), 'db:datadog_test'] for name in ACTIVITY_METRICS: @@ -116,25 +123,24 @@ def test_activity_metrics(aggregator, integration_check, pg_instance): @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') def test_wrong_version(aggregator, integration_check, pg_instance): + check = integration_check(pg_instance) # Enforce to cache wrong version - db_key = ('localhost', 5432, 'datadog_test') - integration_check.version = [9, 6, 0] + check.version = [9, 6, 0] - integration_check.check(pg_instance) - assert_state_clean(integration_check) + check.check(pg_instance) + assert_state_clean(check) - integration_check.check(pg_instance) - assert integration_check.version[0] == int(POSTGRES_VERSION) - assert_state_set(integration_check) + check.check(pg_instance) + assert check.version[0] == int(POSTGRES_VERSION) + assert_state_set(check) @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') def test_state_clears_on_connection_error(integration_check, pg_instance): - db_key = ('localhost', 5432, 'datadog_test') - - integration_check.check(pg_instance) - assert_state_set(integration_check, db_key) + check = integration_check(pg_instance) + check.check(pg_instance) + assert_state_set(check) def throw_exception_first_time(*args, **kwargs): throw_exception_first_time.counter += 1 @@ -146,8 +152,8 @@ def throw_exception_first_time(*args, **kwargs): throw_exception_first_time.counter = 0 with mock.patch('datadog_checks.postgres.PostgreSql._collect_stats', side_effect=throw_exception_first_time): - integration_check.check(pg_instance) - assert_state_clean(integration_check, db_key) + check.check(pg_instance) + assert_state_clean(check) def assert_state_clean(check): @@ -162,7 +168,7 @@ def assert_state_clean(check): def assert_state_set(check,): - assert check.versions + assert check.version assert check.instance_metrics assert check.bgw_metrics if POSTGRES_VERSION != '9.3': diff --git a/postgres/tests/test_relations.py b/postgres/tests/test_relations.py index 4186786c78c5a..52bc7745c1b70 100644 --- a/postgres/tests/test_relations.py +++ b/postgres/tests/test_relations.py @@ -37,10 +37,10 @@ @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_relations_metrics(aggregator, pg_instance): +def test_relations_metrics(aggregator, integration_check, pg_instance): pg_instance['relations'] = ['persons'] - posgres_check = PostgreSql('postgres', {}, [pg_instance]) + posgres_check = integration_check(pg_instance) posgres_check.check(pg_instance) expected_tags = pg_instance['tags'] + [ @@ -72,14 +72,14 @@ def test_relations_metrics(aggregator, pg_instance): @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_relations_metrics2(aggregator, pg_instance): +def test_relations_metrics2(aggregator, integration_check, pg_instance): pg_instance['relations'] = [ {'relation_regex': '.*', 'schemas': ['hello', 'hello2']}, # Empty schemas means all schemas, even though the first relation matches first. {'relation_regex': r'[pP]ersons[-_]?(dup\d)?'}, ] relations = ['persons', 'personsdup1', 'Personsdup2'] - posgres_check = PostgreSql('postgres', {}, [pg_instance]) + posgres_check = integration_check(pg_instance) posgres_check.check(pg_instance) expected_tags = {} @@ -114,11 +114,11 @@ def test_relations_metrics2(aggregator, pg_instance): @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_index_metrics(aggregator, pg_instance): +def test_index_metrics(aggregator, integration_check, pg_instance): pg_instance['relations'] = ['breed'] pg_instance['dbname'] = 'dogs' - posgres_check = PostgreSql('postgres', {}, [pg_instance]) + posgres_check = integration_check(pg_instance) posgres_check.check(pg_instance) expected_tags = pg_instance['tags'] + [ diff --git a/postgres/tests/test_unit.py b/postgres/tests/test_unit.py index dfd08cf2760dc..9c9e30b7008c6 100644 --- a/postgres/tests/test_unit.py +++ b/postgres/tests/test_unit.py @@ -39,11 +39,6 @@ def test_get_instance_metrics_state(check): res = check._get_instance_metrics([], False) assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS) - # also check what happens when `metrics` is not valid - check.instance_metrics = [] - res = check._get_instance_metrics(False, False) - assert res is None - def test_get_instance_metrics_database_size_metrics(check): """ From 4e3c3157b7fc169b7bbe1700a377a59bf047bdb1 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Mon, 21 Oct 2019 11:54:38 +0200 Subject: [PATCH 04/27] Do not modify constants --- postgres/datadog_checks/postgres/postgres.py | 22 +++++++------------- postgres/tests/conftest.py | 1 + postgres/tests/test_integration.py | 3 ++- postgres/tests/test_relations.py | 2 -- 4 files changed, 11 insertions(+), 17 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 8b04c165b1593..d6dfe1f55923d 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -1,9 +1,9 @@ # (C) Datadog, Inc. 2019 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) +import copy import re import socket -import threading from contextlib import closing import psycopg2 @@ -60,7 +60,6 @@ class PostgreSql(AgentCheck): MONOTONIC = AgentCheck.monotonic_count SERVICE_CHECK_NAME = 'postgres.can_connect' - def __init__(self, name, init_config, instances): AgentCheck.__init__(self, name, init_config, instances) self._clean_state() @@ -114,7 +113,6 @@ def _clean_state(self): self.replication_metrics = None self.activity_metrics = None - def _get_replication_role(self, db): cursor = db.cursor() cursor.execute('SELECT pg_is_in_recovery();') @@ -331,7 +329,7 @@ def _get_activity_metrics(self, db, user): metrics_data = self.activity_metrics if metrics_data is None: - query = ACTIVITY_QUERY_10 if self._is_10_or_above( db) else ACTIVITY_QUERY_LT_10 + query = ACTIVITY_QUERY_10 if self._is_10_or_above(db) else ACTIVITY_QUERY_LT_10 if self._is_9_6_or_above(db): metrics_query = ACTIVITY_METRICS_9_6 elif self._is_9_2_or_above(db): @@ -537,18 +535,14 @@ def _collect_stats( replication_metrics = self._get_replication_metrics(db) if replication_metrics is not None: - # FIXME: constants shouldn't be modified - REPLICATION_METRICS['metrics'] = replication_metrics - metric_scope.append(REPLICATION_METRICS) + replication_metrics_query = copy.deepcopy(REPLICATION_METRICS) + replication_metrics_query['metrics'] = replication_metrics + metric_scope.append(replication_metrics_query) cursor = db.cursor() - results_len = self._query_scope( - cursor, db_instance_metrics, db, instance_tags, False, relations_config - ) + results_len = self._query_scope(cursor, db_instance_metrics, db, instance_tags, False, relations_config) if results_len is not None: - self.gauge( - "postgresql.db.count", results_len, tags=[t for t in instance_tags if not t.startswith("db:")] - ) + self.gauge("postgresql.db.count", results_len, tags=[t for t in instance_tags if not t.startswith("db:")]) self._query_scope(cursor, bgw_instance_metrics, db, instance_tags, False, relations_config) self._query_scope(cursor, archiver_instance_metrics, db, instance_tags, False, relations_config) @@ -801,7 +795,7 @@ def check(self, instance): collect_default_db, ) self._get_custom_queries(db, tags, custom_queries) - except (psycopg2.InterfaceError, socket.error) as e: + except (psycopg2.InterfaceError, socket.error): self.log.info("Connection error, will retry on next agent run") self._clean_state() diff --git a/postgres/tests/conftest.py b/postgres/tests/conftest.py index dd826effef361..e9f4081dc5ba5 100644 --- a/postgres/tests/conftest.py +++ b/postgres/tests/conftest.py @@ -42,6 +42,7 @@ def _check(instance): c = PostgreSql('postgres', {}, [instance]) c._is_9_2_or_above = mock.MagicMock() return c + return _check diff --git a/postgres/tests/test_integration.py b/postgres/tests/test_integration.py index b98da67590090..39529f0cad1fe 100644 --- a/postgres/tests/test_integration.py +++ b/postgres/tests/test_integration.py @@ -1,10 +1,11 @@ # (C) Datadog, Inc. 2010-2018 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) +import socket + import mock import psycopg2 import pytest -import socket from datadog_checks.postgres import PostgreSql diff --git a/postgres/tests/test_relations.py b/postgres/tests/test_relations.py index 52bc7745c1b70..1701027f0358c 100644 --- a/postgres/tests/test_relations.py +++ b/postgres/tests/test_relations.py @@ -3,8 +3,6 @@ # Licensed under Simplified BSD License (see LICENSE) import pytest -from datadog_checks.postgres import PostgreSql - RELATION_METRICS = [ 'postgresql.seq_scans', 'postgresql.seq_rows_read', From 755c960007cfefcc88d5acf43a6248d97fa5d98b Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Mon, 21 Oct 2019 12:10:58 +0200 Subject: [PATCH 05/27] Do not iterate on instances on constructor --- postgres/datadog_checks/postgres/postgres.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index d6dfe1f55923d..d3c6b7d7c8aeb 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -67,7 +67,7 @@ def __init__(self, name, init_config, instances): self.custom_metrics = None # Deprecate custom_metrics in favor of custom_queries - if any('custom_metrics' in instance for instance in instances): + if 'custom_metrics' in self.instance: self.warning( "DEPRECATION NOTICE: Please use the new custom_queries option " "rather than the now deprecated custom_metrics" From dd10d0e10fb0f98c021613cb22f60f8746cfc054 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Mon, 21 Oct 2019 12:29:02 +0200 Subject: [PATCH 06/27] DB is always cached, do not pass around --- postgres/datadog_checks/postgres/postgres.py | 120 +++++++++---------- postgres/tests/test_unit.py | 59 ++++----- 2 files changed, 88 insertions(+), 91 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index d3c6b7d7c8aeb..ac360fb9da47d 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -113,16 +113,16 @@ def _clean_state(self): self.replication_metrics = None self.activity_metrics = None - def _get_replication_role(self, db): - cursor = db.cursor() + def _get_replication_role(self): + cursor = self.db.cursor() cursor.execute('SELECT pg_is_in_recovery();') role = cursor.fetchone()[0] # value fetched for role is of return "standby" if role else "master" - def _get_version(self, db): + def _get_version(self): if self.version is None: - cursor = db.cursor() + cursor = self.db.cursor() cursor.execute('SHOW SERVER_VERSION;') version = cursor.fetchone()[0] try: @@ -145,8 +145,8 @@ def _get_version(self, db): self.service_metadata('version', self.version) return self.version - def _is_above(self, db, version_to_compare): - version = self._get_version(db) + def _is_above(self, version_to_compare): + version = self._get_version() if type(version) == list: # iterate from major down to bugfix for v, vc in zip_longest(version, version_to_compare, fillvalue=0): @@ -159,23 +159,23 @@ def _is_above(self, db, version_to_compare): return True return False - def _is_8_3_or_above(self, db): - return self._is_above(db, [8, 3, 0]) + def _is_8_3_or_above(self): + return self._is_above([8, 3, 0]) - def _is_9_1_or_above(self, db): - return self._is_above(db, [9, 1, 0]) + def _is_9_1_or_above(self): + return self._is_above([9, 1, 0]) - def _is_9_2_or_above(self, db): - return self._is_above(db, [9, 2, 0]) + def _is_9_2_or_above(self): + return self._is_above([9, 2, 0]) - def _is_9_4_or_above(self, db): - return self._is_above(db, [9, 4, 0]) + def _is_9_4_or_above(self): + return self._is_above([9, 4, 0]) - def _is_9_6_or_above(self, db): - return self._is_above(db, [9, 6, 0]) + def _is_9_6_or_above(self): + return self._is_above([9, 6, 0]) - def _is_10_or_above(self, db): - return self._is_above(db, [10, 0, 0]) + def _is_10_or_above(self): + return self._is_above([10, 0, 0]) def _get_instance_metrics(self, database_size_metrics, collect_default_db): """ @@ -221,7 +221,7 @@ def _get_instance_metrics(self, database_size_metrics, collect_default_db): return res - def _get_bgw_metrics(self, db): + def _get_bgw_metrics(self): """Use either COMMON_BGW_METRICS or COMMON_BGW_METRICS + NEWER_92_BGW_METRICS depending on the postgres version. Uses a dictionary to save the result for each instance @@ -245,9 +245,9 @@ def _get_bgw_metrics(self, db): self.db_bgw_metrics.append(sub_key) self.bgw_metrics = dict(COMMON_BGW_METRICS) - if self._is_9_1_or_above(db): + if self._is_9_1_or_above(): self.bgw_metrics.update(NEWER_91_BGW_METRICS) - if self._is_9_2_or_above(db): + if self._is_9_2_or_above(): self.bgw_metrics.update(NEWER_92_BGW_METRICS) metrics = self.bgw_metrics @@ -269,7 +269,7 @@ def _get_count_metrics(self, table_count_limit): ) return metrics - def _get_archiver_metrics(self, db): + def _get_archiver_metrics(self): """Use COMMON_ARCHIVER_METRICS to read from pg_stat_archiver as defined in 9.4 (first version to have this table). Uses a dictionary to save the result for each instance @@ -278,7 +278,7 @@ def _get_archiver_metrics(self, db): # the table, mirroring _get_bgw_metrics() metrics = self.archiver_metrics - if metrics is None and self._is_9_4_or_above(db): + if metrics is None and self._is_9_4_or_above(): # Collect from only one instance. See _get_bgw_metrics() for details on why. sub_key = self.key[:2] if sub_key in self.db_archiver_metrics: @@ -311,17 +311,17 @@ def _get_replication_metrics(self, db): Uses a dictionnary to save the result for each instance """ metrics = self.replication_metrics - if self._is_10_or_above(db) and metrics is None: + if self._is_10_or_above() and metrics is None: self.replication_metrics = dict(REPLICATION_METRICS_10) metrics = self.replication_metrics - elif self._is_9_1_or_above(db) and metrics is None: + elif self._is_9_1_or_above() and metrics is None: self.replication_metrics = dict(REPLICATION_METRICS_9_1) - if self._is_9_2_or_above(db): + if self._is_9_2_or_above(): self.replication_metrics.update(REPLICATION_METRICS_9_2) metrics = self.replication_metrics return metrics - def _get_activity_metrics(self, db, user): + def _get_activity_metrics(self, user): """ Use ACTIVITY_METRICS_LT_8_3 or ACTIVITY_METRICS_8_3 or ACTIVITY_METRICS_9_2 depending on the postgres version in conjunction with ACTIVITY_QUERY_10 or ACTIVITY_QUERY_LT_10. Uses a dictionnary to save the result for each instance @@ -329,12 +329,12 @@ def _get_activity_metrics(self, db, user): metrics_data = self.activity_metrics if metrics_data is None: - query = ACTIVITY_QUERY_10 if self._is_10_or_above(db) else ACTIVITY_QUERY_LT_10 - if self._is_9_6_or_above(db): + query = ACTIVITY_QUERY_10 if self._is_10_or_above() else ACTIVITY_QUERY_LT_10 + if self._is_9_6_or_above(): metrics_query = ACTIVITY_METRICS_9_6 - elif self._is_9_2_or_above(db): + elif self._is_9_2_or_above(): metrics_query = ACTIVITY_METRICS_9_2 - elif self._is_8_3_or_above(db): + elif self._is_8_3_or_above(): metrics_query = ACTIVITY_METRICS_8_3 else: metrics_query = ACTIVITY_METRICS_LT_8_3 @@ -382,11 +382,11 @@ def _build_relations_config(self, yamlconfig): self.log.warning('Unhandled relations config type: {}'.format(element)) return config - def _query_scope(self, cursor, scope, db, instance_tags, is_custom_metrics, relations_config): + def _query_scope(self, cursor, scope, instance_tags, is_custom_metrics, relations_config): if scope is None: return None - if scope == REPLICATION_METRICS or not self._is_above(db, [9, 0, 0]): + if scope == REPLICATION_METRICS or not self._is_above([9, 0, 0]): log_func = self.log.debug else: log_func = self.log.warning @@ -417,10 +417,10 @@ def _query_scope(self, cursor, scope, db, instance_tags, is_custom_metrics, rela "A reattempt to identify the right version will happen on next agent run." % self.version ) self._clean_state() - db.rollback() + self.db.rollback() except (psycopg2.ProgrammingError, psycopg2.errors.QueryCanceled) as e: log_func("Not all metrics may be available: %s" % str(e)) - db.rollback() + self.db.rollback() if not results: return None @@ -498,7 +498,6 @@ def _query_scope(self, cursor, scope, db, instance_tags, is_custom_metrics, rela def _collect_stats( self, - db, user, instance_tags, relations, @@ -539,20 +538,20 @@ def _collect_stats( replication_metrics_query['metrics'] = replication_metrics metric_scope.append(replication_metrics_query) - cursor = db.cursor() - results_len = self._query_scope(cursor, db_instance_metrics, db, instance_tags, False, relations_config) + cursor = self.db.cursor() + results_len = self._query_scope(cursor, db_instance_metrics, instance_tags, False, relations_config) if results_len is not None: self.gauge("postgresql.db.count", results_len, tags=[t for t in instance_tags if not t.startswith("db:")]) - self._query_scope(cursor, bgw_instance_metrics, db, instance_tags, False, relations_config) - self._query_scope(cursor, archiver_instance_metrics, db, instance_tags, False, relations_config) + self._query_scope(cursor, bgw_instance_metrics, instance_tags, False, relations_config) + self._query_scope(cursor, archiver_instance_metrics, instance_tags, False, relations_config) if collect_activity_metrics: - activity_metrics = self._get_activity_metrics(db, user) - self._query_scope(cursor, activity_metrics, db, instance_tags, False, relations_config) + activity_metrics = self._get_activity_metrics(user) + self._query_scope(cursor, activity_metrics, instance_tags, False, relations_config) for scope in list(metric_scope) + custom_metrics: - self._query_scope(cursor, scope, db, instance_tags, scope in custom_metrics, relations_config) + self._query_scope(cursor, scope, instance_tags, scope in custom_metrics, relations_config) cursor.close() @@ -563,23 +562,21 @@ def _get_service_check_tags(cls, host, tags): service_check_tags = list(set(service_check_tags)) return service_check_tags - def get_connection(self, host, port, user, password, dbname, ssl, tags, use_cached=True): + def _connect(self, host, port, user, password, dbname, ssl, tags): """Get and memoize connections to instances""" - if self.db and use_cached: - conn = self.db - if conn.status != psycopg2.extensions.STATUS_READY: + if self.db: + if self.db.status != psycopg2.extensions.STATUS_READY: # Some transaction went wrong and the connection is in an unhealthy state. Let's fix that - conn.rollback() - return conn + self.db.rollback() elif host != "" and user != "": try: if host == 'localhost' and password == '': # Use ident method - connection = psycopg2.connect( + self.db = psycopg2.connect( "user=%s dbname=%s, application_name=%s" % (user, dbname, "datadog-agent") ) elif port != '': - connection = psycopg2.connect( + self.db = psycopg2.connect( host=host, port=port, user=user, @@ -589,7 +586,7 @@ def get_connection(self, host, port, user, password, dbname, ssl, tags, use_cach application_name="datadog-agent", ) else: - connection = psycopg2.connect( + self.db = psycopg2.connect( host=host, user=user, password=password, @@ -597,8 +594,6 @@ def get_connection(self, host, port, user, password, dbname, ssl, tags, use_cach sslmode=ssl, application_name="datadog-agent", ) - self.db = connection - return connection except Exception as e: message = u'Error establishing postgres connection: %s' % (str(e)) service_check_tags = self._get_service_check_tags(host, tags) @@ -612,7 +607,7 @@ def get_connection(self, host, port, user, password, dbname, ssl, tags, use_cach elif not user: raise ConfigurationError('Please specify a user to connect to Postgres as.') - def _get_custom_queries(self, db, tags, custom_queries): + def _get_custom_queries(self, tags, custom_queries): """ Given a list of custom_queries, execute each query and parse the result for metrics """ @@ -633,14 +628,14 @@ def _get_custom_queries(self, db, tags, custom_queries): self.log.error("custom query field `columns` is required for metric_prefix `{}`".format(metric_prefix)) continue - cursor = db.cursor() + cursor = self.db.cursor() with closing(cursor) as cursor: try: self.log.debug("Running query: {}".format(query)) cursor.execute(query) except (psycopg2.ProgrammingError, psycopg2.errors.QueryCanceled) as e: self.log.error("Error executing query for metric_prefix {}: {}".format(metric_prefix, str(e))) - db.rollback() + self.db.rollback() continue for row in cursor: @@ -776,13 +771,12 @@ def check(self, instance): db = None try: # Check version - db = self.get_connection(host, port, user, password, dbname, ssl, tags) + self._connect(host, port, user, password, dbname, ssl, tags) if tag_replication_role: - tags.extend(["replication_role:{}".format(self._get_replication_role(db))]) + tags.extend(["replication_role:{}".format(self._get_replication_role())]) version = self._get_version(db) self.log.debug("Running check against version %s" % version) self._collect_stats( - db, user, tags, self.relations, @@ -794,17 +788,17 @@ def check(self, instance): collect_database_size_metrics, collect_default_db, ) - self._get_custom_queries(db, tags, custom_queries) + self._get_custom_queries(tags, custom_queries) except (psycopg2.InterfaceError, socket.error): self.log.info("Connection error, will retry on next agent run") self._clean_state() - if db is not None: + if self.db is not None: service_check_tags = self._get_service_check_tags(host, tags) message = u'Established connection to postgres://%s:%s/%s' % (host, port, dbname) self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags, message=message) try: # commit to close the current query transaction - db.commit() + self.db.commit() except Exception as e: self.log.warning("Unable to commit: {0}".format(e)) diff --git a/postgres/tests/test_unit.py b/postgres/tests/test_unit.py index 9c9e30b7008c6..bbfb9fc48cb01 100644 --- a/postgres/tests/test_unit.py +++ b/postgres/tests/test_unit.py @@ -69,30 +69,31 @@ def test_get_version(check): Test _get_version() to make sure the check is properly parsing Postgres versions """ db = MagicMock() + check.db = db # Test #.#.# style versions db.cursor().fetchone.return_value = ['9.5.3'] - assert check._get_version(db) == [9, 5, 3] + assert check._get_version() == [9, 5, 3] # Test #.# style versions db.cursor().fetchone.return_value = ['10.2'] check._clean_state() - assert check._get_version(db) == [10, 2] + assert check._get_version() == [10, 2] # Test #beta# style versions db.cursor().fetchone.return_value = ['11beta3'] check._clean_state() - assert check._get_version(db) == [11, -1, 3] + assert check._get_version() == [11, -1, 3] # Test #rc# style versions db.cursor().fetchone.return_value = ['11rc1'] check._clean_state() - assert check._get_version(db) == [11, -1, 1] + assert check._get_version() == [11, -1, 1] # Test #unknown# style versions db.cursor().fetchone.return_value = ['11nightly3'] check._clean_state() - assert check._get_version(db) == [11, -1, 3] + assert check._get_version() == [11, -1, 3] def test_is_above(check): @@ -100,48 +101,49 @@ def test_is_above(check): Test _is_above() to make sure the check is properly determining order of versions """ db = MagicMock() + check.db = db # Test major versions db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above(db, [9, 5, 4]) - assert check._is_above(db, [11, 0, 0]) is False + assert check._is_above([9, 5, 4]) + assert check._is_above([11, 0, 0]) is False # Test minor versions db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above(db, [10, 4, 4]) - assert check._is_above(db, [10, 6, 4]) is False + assert check._is_above([10, 4, 4]) + assert check._is_above([10, 6, 4]) is False # Test patch versions db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above(db, [10, 5, 3]) - assert check._is_above(db, [10, 5, 5]) is False + assert check._is_above([10, 5, 3]) + assert check._is_above([10, 5, 5]) is False # Test same version, _is_above() returns True for greater than or equal to db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above(db, [10, 5, 4]) + assert check._is_above([10, 5, 4]) # Test beta version above db.cursor().fetchone.return_value = ['11beta4'] check._clean_state() check._clean_state() - assert check._is_above(db, [11, -1, 3]) + assert check._is_above([11, -1, 3]) # Test beta version against official version db.cursor().fetchone.return_value = ['11.0.0'] check._clean_state() - assert check._is_above(db, [11, -1, 3]) + assert check._is_above([11, -1, 3]) # Test versions of unequal length db.cursor().fetchone.return_value = ['10.0'] check._clean_state() - assert check._is_above(db, [10, 0]) - assert check._is_above(db, [10, 0, 0]) - assert check._is_above(db, [10, 0, 1]) is False + assert check._is_above([10, 0]) + assert check._is_above([10, 0, 0]) + assert check._is_above([10, 0, 1]) is False # Test return value is not a list db.cursor().fetchone.return_value = "foo" check._clean_state() - assert check._is_above(db, [10, 0]) is False + assert check._is_above([10, 0]) is False def test_malformed_get_custom_queries(check): @@ -150,17 +152,18 @@ def test_malformed_get_custom_queries(check): """ check.log = MagicMock() db = MagicMock() + check.db = db malformed_custom_query = {} # Make sure 'metric_prefix' is defined - check._get_custom_queries(db, [], [malformed_custom_query]) + check._get_custom_queries([], [malformed_custom_query]) check.log.error.assert_called_once_with("custom query field `metric_prefix` is required") check.log.reset_mock() # Make sure 'query' is defined malformed_custom_query['metric_prefix'] = 'postgresql' - check._get_custom_queries(db, [], [malformed_custom_query]) + check._get_custom_queries([], [malformed_custom_query]) check.log.error.assert_called_once_with( "custom query field `query` is required for metric_prefix `{}`".format(malformed_custom_query['metric_prefix']) ) @@ -168,7 +171,7 @@ def test_malformed_get_custom_queries(check): # Make sure 'columns' is defined malformed_custom_query['query'] = 'SELECT num FROM sometable' - check._get_custom_queries(db, [], [malformed_custom_query]) + check._get_custom_queries([], [malformed_custom_query]) check.log.error.assert_called_once_with( "custom query field `columns` is required for metric_prefix `{}`".format( malformed_custom_query['metric_prefix'] @@ -180,7 +183,7 @@ def test_malformed_get_custom_queries(check): malformed_custom_query_column = {} malformed_custom_query['columns'] = [malformed_custom_query_column] db.cursor().execute.side_effect = psycopg2.ProgrammingError - check._get_custom_queries(db, [], [malformed_custom_query]) + check._get_custom_queries([], [malformed_custom_query]) check.log.error.assert_called_once_with( "Error executing query for metric_prefix {}: ".format(malformed_custom_query['metric_prefix']) ) @@ -192,7 +195,7 @@ def test_malformed_get_custom_queries(check): query_return = ['num', 1337] db.cursor().execute.side_effect = None db.cursor().__iter__.return_value = iter([query_return]) - check._get_custom_queries(db, [], [malformed_custom_query]) + check._get_custom_queries([], [malformed_custom_query]) check.log.error.assert_called_once_with( "query result for metric_prefix {}: expected {} columns, got {}".format( malformed_custom_query['metric_prefix'], len(malformed_custom_query['columns']), len(query_return) @@ -202,7 +205,7 @@ def test_malformed_get_custom_queries(check): # Make sure the query does not return an empty result db.cursor().__iter__.return_value = iter([[]]) - check._get_custom_queries(db, [], [malformed_custom_query]) + check._get_custom_queries([], [malformed_custom_query]) check.log.debug.assert_called_with( "query result for metric_prefix {}: returned an empty result".format(malformed_custom_query['metric_prefix']) ) @@ -211,7 +214,7 @@ def test_malformed_get_custom_queries(check): # Make sure 'name' is defined in each column malformed_custom_query_column['some_key'] = 'some value' db.cursor().__iter__.return_value = iter([[1337]]) - check._get_custom_queries(db, [], [malformed_custom_query]) + check._get_custom_queries([], [malformed_custom_query]) check.log.error.assert_called_once_with( "column field `name` is required for metric_prefix `{}`".format(malformed_custom_query['metric_prefix']) ) @@ -220,7 +223,7 @@ def test_malformed_get_custom_queries(check): # Make sure 'type' is defined in each column malformed_custom_query_column['name'] = 'num' db.cursor().__iter__.return_value = iter([[1337]]) - check._get_custom_queries(db, [], [malformed_custom_query]) + check._get_custom_queries([], [malformed_custom_query]) check.log.error.assert_called_once_with( "column field `type` is required for column `{}` " "of metric_prefix `{}`".format(malformed_custom_query_column['name'], malformed_custom_query['metric_prefix']) @@ -230,7 +233,7 @@ def test_malformed_get_custom_queries(check): # Make sure 'type' is a valid metric type malformed_custom_query_column['type'] = 'invalid_type' db.cursor().__iter__.return_value = iter([[1337]]) - check._get_custom_queries(db, [], [malformed_custom_query]) + check._get_custom_queries([], [malformed_custom_query]) check.log.error.assert_called_once_with( "invalid submission method `{}` for column `{}` of " "metric_prefix `{}`".format( @@ -246,7 +249,7 @@ def test_malformed_get_custom_queries(check): query_return = MagicMock() query_return.__float__.side_effect = ValueError('Mocked exception') db.cursor().__iter__.return_value = iter([[query_return]]) - check._get_custom_queries(db, [], [malformed_custom_query]) + check._get_custom_queries([], [malformed_custom_query]) check.log.error.assert_called_once_with( "non-numeric value `{}` for metric column `{}` of " "metric_prefix `{}`".format( From c600eb67bf79c95286b78fd4e71184798aac4219 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Mon, 21 Oct 2019 13:32:19 +0200 Subject: [PATCH 07/27] Extract version utils --- postgres/datadog_checks/postgres/postgres.py | 80 ++++-------------- .../datadog_checks/postgres/version_utils.py | 58 +++++++++++++ postgres/tests/test_unit.py | 81 ------------------- postgres/tests/test_version_utils.py | 79 ++++++++++++++++++ 4 files changed, 152 insertions(+), 146 deletions(-) create mode 100644 postgres/datadog_checks/postgres/version_utils.py create mode 100644 postgres/tests/test_version_utils.py diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index d3c6b7d7c8aeb..def023b236061 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -7,8 +7,9 @@ from contextlib import closing import psycopg2 +from .version_utils import get_version, is_above, is_9_2_or_above, is_9_1_or_above, is_10_or_above, is_9_6_or_above, \ + is_8_3_or_above from six import iteritems -from six.moves import zip_longest from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative @@ -122,61 +123,10 @@ def _get_replication_role(self, db): def _get_version(self, db): if self.version is None: - cursor = db.cursor() - cursor.execute('SHOW SERVER_VERSION;') - version = cursor.fetchone()[0] - try: - version_parts = version.split(' ')[0].split('.') - version = [int(part) for part in version_parts] - except Exception: - # Postgres might be in development, with format \d+[beta|rc]\d+ - match = re.match(r'(\d+)([a-zA-Z]+)(\d+)', version) - if match: - version_parts = list(match.groups()) - - # We found a valid development version - if len(version_parts) == 3: - # Replace development tag with a negative number to properly compare versions - version_parts[1] = -1 - version = [int(part) for part in version_parts] - - self.version = version - - self.service_metadata('version', self.version) + self.version = get_version(db) + self.service_metadata('version', [self.version['major'], self.version['minor'], self.version['patch']]) return self.version - def _is_above(self, db, version_to_compare): - version = self._get_version(db) - if type(version) == list: - # iterate from major down to bugfix - for v, vc in zip_longest(version, version_to_compare, fillvalue=0): - if v == vc: - continue - - return v > vc - - # return True if version is the same - return True - return False - - def _is_8_3_or_above(self, db): - return self._is_above(db, [8, 3, 0]) - - def _is_9_1_or_above(self, db): - return self._is_above(db, [9, 1, 0]) - - def _is_9_2_or_above(self, db): - return self._is_above(db, [9, 2, 0]) - - def _is_9_4_or_above(self, db): - return self._is_above(db, [9, 4, 0]) - - def _is_9_6_or_above(self, db): - return self._is_above(db, [9, 6, 0]) - - def _is_10_or_above(self, db): - return self._is_above(db, [10, 0, 0]) - def _get_instance_metrics(self, database_size_metrics, collect_default_db): """ Add NEWER_92_METRICS to the default set of COMMON_METRICS when server @@ -193,7 +143,7 @@ def _get_instance_metrics(self, database_size_metrics, collect_default_db): if metrics is None: # select the right set of metrics to collect depending on postgres version - if self._is_9_2_or_above(): + if is_9_2_or_above(self.get_version()): self.instance_metrics = dict(COMMON_METRICS, **NEWER_92_METRICS) else: self.instance_metrics = dict(COMMON_METRICS) @@ -245,9 +195,9 @@ def _get_bgw_metrics(self, db): self.db_bgw_metrics.append(sub_key) self.bgw_metrics = dict(COMMON_BGW_METRICS) - if self._is_9_1_or_above(db): + if is_9_1_or_above(self._get_version()): self.bgw_metrics.update(NEWER_91_BGW_METRICS) - if self._is_9_2_or_above(db): + if _is_9_2_or_above(self._get_version()): self.bgw_metrics.update(NEWER_92_BGW_METRICS) metrics = self.bgw_metrics @@ -278,7 +228,7 @@ def _get_archiver_metrics(self, db): # the table, mirroring _get_bgw_metrics() metrics = self.archiver_metrics - if metrics is None and self._is_9_4_or_above(db): + if metrics is None and is_9_4_or_above(self._get_version()): # Collect from only one instance. See _get_bgw_metrics() for details on why. sub_key = self.key[:2] if sub_key in self.db_archiver_metrics: @@ -311,12 +261,12 @@ def _get_replication_metrics(self, db): Uses a dictionnary to save the result for each instance """ metrics = self.replication_metrics - if self._is_10_or_above(db) and metrics is None: + if is_10_or_above(self.get_version()) and metrics is None: self.replication_metrics = dict(REPLICATION_METRICS_10) metrics = self.replication_metrics - elif self._is_9_1_or_above(db) and metrics is None: + elif is_9_1_or_above(self._get_version()) and metrics is None: self.replication_metrics = dict(REPLICATION_METRICS_9_1) - if self._is_9_2_or_above(db): + if is_9_2_or_above(self._get_version()): self.replication_metrics.update(REPLICATION_METRICS_9_2) metrics = self.replication_metrics return metrics @@ -330,11 +280,11 @@ def _get_activity_metrics(self, db, user): if metrics_data is None: query = ACTIVITY_QUERY_10 if self._is_10_or_above(db) else ACTIVITY_QUERY_LT_10 - if self._is_9_6_or_above(db): + if is_9_6_or_above(self._get_version()): metrics_query = ACTIVITY_METRICS_9_6 - elif self._is_9_2_or_above(db): + elif is_9_2_or_above(self._get_version()): metrics_query = ACTIVITY_METRICS_9_2 - elif self._is_8_3_or_above(db): + elif is_8_3_or_above(self._get_version()): metrics_query = ACTIVITY_METRICS_8_3 else: metrics_query = ACTIVITY_METRICS_LT_8_3 @@ -386,7 +336,7 @@ def _query_scope(self, cursor, scope, db, instance_tags, is_custom_metrics, rela if scope is None: return None - if scope == REPLICATION_METRICS or not self._is_above(db, [9, 0, 0]): + if scope == REPLICATION_METRICS or not is_above(self._get_version(), "9.0.0"): log_func = self.log.debug else: log_func = self.log.warning diff --git a/postgres/datadog_checks/postgres/version_utils.py b/postgres/datadog_checks/postgres/version_utils.py new file mode 100644 index 0000000000000..c781d800642e9 --- /dev/null +++ b/postgres/datadog_checks/postgres/version_utils.py @@ -0,0 +1,58 @@ +# (C) Datadog, Inc. 2019 +# All rights reserved +# Licensed under Simplified BSD License (see LICENSE) +import re +import semver + + +def get_version(db): + cursor = db.cursor() + cursor.execute('SHOW SERVER_VERSION;') + raw_version = cursor.fetchone()[0] + try: + version_parts = raw_version.split(' ')[0].split('.') + version = [int(part) for part in version_parts] + while len(version) < 3: + version.append(0) + return semver.VersionInfo(*version) + except Exception: + # Postgres might be in development, with format \d+[beta|rc]\d+ + match = re.match(r'(\d+)([a-zA-Z]+)(\d+)', raw_version) + if match: + version = list(match.groups()) + # We found a valid development version + if len(version) == 3: + # Replace development tag with a negative number to properly compare versions + version[1] = -1 + version = [int(part) for part in version] + return semver.VersionInfo(*version) + + +def is_above(version, version_to_compare): + if version is None: + return False + return version >= semver.parse(version_to_compare) + + +def is_8_3_or_above(version): + return is_above(version, "8.3.0") + + +def is_9_1_or_above(version): + return is_above(version, "9.1.0") + + +def is_9_2_or_above(version): + return is_above(version, "9.2.0") + + +def is_9_4_or_above(version): + return is_above(version, "9.4.0") + + +def is_9_6_or_above(version): + return is_above(version, "9.6.0") + + +def is_10_or_above(version): + return is_above(version, "10.0.0") diff --git a/postgres/tests/test_unit.py b/postgres/tests/test_unit.py index 9c9e30b7008c6..280ea7d9ef0f6 100644 --- a/postgres/tests/test_unit.py +++ b/postgres/tests/test_unit.py @@ -7,7 +7,6 @@ from datadog_checks.postgres import util -# Mark the entire module as tests of type `unit` pytestmark = pytest.mark.unit @@ -64,86 +63,6 @@ def test_get_instance_with_default(check): assert " AND psd.datname not ilike 'postgres'" not in res['query'] -def test_get_version(check): - """ - Test _get_version() to make sure the check is properly parsing Postgres versions - """ - db = MagicMock() - - # Test #.#.# style versions - db.cursor().fetchone.return_value = ['9.5.3'] - assert check._get_version(db) == [9, 5, 3] - - # Test #.# style versions - db.cursor().fetchone.return_value = ['10.2'] - check._clean_state() - assert check._get_version(db) == [10, 2] - - # Test #beta# style versions - db.cursor().fetchone.return_value = ['11beta3'] - check._clean_state() - assert check._get_version(db) == [11, -1, 3] - - # Test #rc# style versions - db.cursor().fetchone.return_value = ['11rc1'] - check._clean_state() - assert check._get_version(db) == [11, -1, 1] - - # Test #unknown# style versions - db.cursor().fetchone.return_value = ['11nightly3'] - check._clean_state() - assert check._get_version(db) == [11, -1, 3] - - -def test_is_above(check): - """ - Test _is_above() to make sure the check is properly determining order of versions - """ - db = MagicMock() - - # Test major versions - db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above(db, [9, 5, 4]) - assert check._is_above(db, [11, 0, 0]) is False - - # Test minor versions - db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above(db, [10, 4, 4]) - assert check._is_above(db, [10, 6, 4]) is False - - # Test patch versions - db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above(db, [10, 5, 3]) - assert check._is_above(db, [10, 5, 5]) is False - - # Test same version, _is_above() returns True for greater than or equal to - db.cursor().fetchone.return_value = ['10.5.4'] - assert check._is_above(db, [10, 5, 4]) - - # Test beta version above - db.cursor().fetchone.return_value = ['11beta4'] - check._clean_state() - check._clean_state() - assert check._is_above(db, [11, -1, 3]) - - # Test beta version against official version - db.cursor().fetchone.return_value = ['11.0.0'] - check._clean_state() - assert check._is_above(db, [11, -1, 3]) - - # Test versions of unequal length - db.cursor().fetchone.return_value = ['10.0'] - check._clean_state() - assert check._is_above(db, [10, 0]) - assert check._is_above(db, [10, 0, 0]) - assert check._is_above(db, [10, 0, 1]) is False - - # Test return value is not a list - db.cursor().fetchone.return_value = "foo" - check._clean_state() - assert check._is_above(db, [10, 0]) is False - - def test_malformed_get_custom_queries(check): """ Test early-exit conditions for _get_custom_queries() diff --git a/postgres/tests/test_version_utils.py b/postgres/tests/test_version_utils.py new file mode 100644 index 0000000000000..b17457d92e05e --- /dev/null +++ b/postgres/tests/test_version_utils.py @@ -0,0 +1,79 @@ +# (C) Datadog, Inc. 2018 +# All rights reserved +# Licensed under Simplified BSD License (see LICENSE) +import pytest + +from semver import VersionInfo +from mock import MagicMock +from datadog_checks.postgres.version_utils import get_version, is_above + +pytestmark = pytest.mark.unit + + +def test_get_version(): + """ + Test _get_version() to make sure the check is properly parsing Postgres versions + """ + db = MagicMock() + + # Test #.#.# style versions + db.cursor().fetchone.return_value = ['9.5.3'] + assert get_version(db) == VersionInfo(9, 5, 3) + + # Test #.# style versions + db.cursor().fetchone.return_value = ['10.2'] + assert get_version(db) == VersionInfo(10, 2, 0) + + # Test #beta# style versions + db.cursor().fetchone.return_value = ['11beta3'] + assert get_version(db) == VersionInfo(11, -1, 3) + + # Test #rc# style versions + db.cursor().fetchone.return_value = ['11rc1'] + assert get_version(db) == VersionInfo(11, -1, 1) + + # Test #unknown# style versions + db.cursor().fetchone.return_value = ['11nightly3'] + assert get_version(db) == VersionInfo(11, -1, 3) + + +def test_is_above(): + """ + Test _is_above() to make sure the check is properly determining order of versions + """ + # Test major versions + version = VersionInfo(10, 5, 4) + assert is_above(version, "9.5.4") + assert is_above(version, "11.0.0") is False + + # Test minor versions + assert is_above(version, "10.4.4") + assert is_above(version, "10.6.4") is False + + # Test patch versions + assert is_above(version, "10.5.3") + assert is_above(version, "10.5.5") is False + + # Test same version, _is_above() returns True for greater than or equal to + assert is_above(version, "10.5.4") + + # Test beta version above + db = MagicMock() + db.cursor().fetchone.return_value = ['11beta4'] + version = get_version(db) + assert version > VersionInfo(11, -1, 3) + + # Test beta version against official version + version = VersionInfo(11, 0, 0) + assert version > VersionInfo(11, -1, 3) + + # Test versions of unequal length + db.cursor().fetchone.return_value = ['10.0'] + version = get_version(db) + assert is_above(version, "10.0.0") + assert is_above(version, "10.0.1") is False + + # Test return value is not a list + db.cursor().fetchone.return_value = "foo" + version = get_version(db) + assert is_above(version, "10.0.0") is False From 9c7ce54467c6c24e15f5261841ffc1ca807b0656 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Mon, 21 Oct 2019 13:53:37 +0200 Subject: [PATCH 08/27] Fix unit tests --- postgres/datadog_checks/postgres/postgres.py | 35 ++++++++++---------- postgres/tests/conftest.py | 5 ++- postgres/tests/test_integration.py | 4 +-- postgres/tests/test_unit.py | 7 ++-- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index def023b236061..70df579b62b87 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -105,7 +105,7 @@ def _build_tags(self, custom_tags, host, port, dbname): return tags def _clean_state(self): - self.version = None + self._version = None self.instance_metrics = None self.bgw_metrics = None self.archiver_metrics = None @@ -121,11 +121,12 @@ def _get_replication_role(self, db): # value fetched for role is of return "standby" if role else "master" - def _get_version(self, db): - if self.version is None: - self.version = get_version(db) - self.service_metadata('version', [self.version['major'], self.version['minor'], self.version['patch']]) - return self.version + @property + def version(self): + if self._version is None: + self._version = get_version(self.db) + self.service_metadata('version', [self._version['major'], self._version['minor'], self._version['patch']]) + return self._version def _get_instance_metrics(self, database_size_metrics, collect_default_db): """ @@ -143,7 +144,7 @@ def _get_instance_metrics(self, database_size_metrics, collect_default_db): if metrics is None: # select the right set of metrics to collect depending on postgres version - if is_9_2_or_above(self.get_version()): + if is_9_2_or_above(self.version): self.instance_metrics = dict(COMMON_METRICS, **NEWER_92_METRICS) else: self.instance_metrics = dict(COMMON_METRICS) @@ -195,9 +196,9 @@ def _get_bgw_metrics(self, db): self.db_bgw_metrics.append(sub_key) self.bgw_metrics = dict(COMMON_BGW_METRICS) - if is_9_1_or_above(self._get_version()): + if is_9_1_or_above(self.version): self.bgw_metrics.update(NEWER_91_BGW_METRICS) - if _is_9_2_or_above(self._get_version()): + if is_9_2_or_above(self.version): self.bgw_metrics.update(NEWER_92_BGW_METRICS) metrics = self.bgw_metrics @@ -228,7 +229,7 @@ def _get_archiver_metrics(self, db): # the table, mirroring _get_bgw_metrics() metrics = self.archiver_metrics - if metrics is None and is_9_4_or_above(self._get_version()): + if metrics is None and is_9_4_or_above(self.version): # Collect from only one instance. See _get_bgw_metrics() for details on why. sub_key = self.key[:2] if sub_key in self.db_archiver_metrics: @@ -264,9 +265,9 @@ def _get_replication_metrics(self, db): if is_10_or_above(self.get_version()) and metrics is None: self.replication_metrics = dict(REPLICATION_METRICS_10) metrics = self.replication_metrics - elif is_9_1_or_above(self._get_version()) and metrics is None: + elif is_9_1_or_above(self.version) and metrics is None: self.replication_metrics = dict(REPLICATION_METRICS_9_1) - if is_9_2_or_above(self._get_version()): + if is_9_2_or_above(self.version): self.replication_metrics.update(REPLICATION_METRICS_9_2) metrics = self.replication_metrics return metrics @@ -280,11 +281,11 @@ def _get_activity_metrics(self, db, user): if metrics_data is None: query = ACTIVITY_QUERY_10 if self._is_10_or_above(db) else ACTIVITY_QUERY_LT_10 - if is_9_6_or_above(self._get_version()): + if is_9_6_or_above(self.version): metrics_query = ACTIVITY_METRICS_9_6 - elif is_9_2_or_above(self._get_version()): + elif is_9_2_or_above(self.version): metrics_query = ACTIVITY_METRICS_9_2 - elif is_8_3_or_above(self._get_version()): + elif is_8_3_or_above(self.version): metrics_query = ACTIVITY_METRICS_8_3 else: metrics_query = ACTIVITY_METRICS_LT_8_3 @@ -336,7 +337,7 @@ def _query_scope(self, cursor, scope, db, instance_tags, is_custom_metrics, rela if scope is None: return None - if scope == REPLICATION_METRICS or not is_above(self._get_version(), "9.0.0"): + if scope == REPLICATION_METRICS or not is_above(self.version, "9.0.0"): log_func = self.log.debug else: log_func = self.log.warning @@ -364,7 +365,7 @@ def _query_scope(self, cursor, scope, db, instance_tags, is_custom_metrics, rela log_func(e) log_func( "It seems the PG version has been incorrectly identified as %s. " - "A reattempt to identify the right version will happen on next agent run." % self.version + "A reattempt to identify the right version will happen on next agent run." % self._version ) self._clean_state() db.rollback() diff --git a/postgres/tests/conftest.py b/postgres/tests/conftest.py index e9f4081dc5ba5..9f8c9d6b6cd02 100644 --- a/postgres/tests/conftest.py +++ b/postgres/tests/conftest.py @@ -9,6 +9,7 @@ from datadog_checks.dev import WaitFor, docker_run from datadog_checks.postgres import PostgreSql +from semver import VersionInfo from .common import DB_NAME, HOST, PASSWORD, PORT, USER @@ -31,8 +32,7 @@ def dd_environment(e2e_instance): @pytest.fixture def check(): c = PostgreSql('postgres', {}, [{'dbname': 'dbname', 'host': 'localhost', 'port': '5432'}]) - c._is_9_2_or_above = mock.MagicMock() - PostgreSql._known_servers = set() # reset the global state + c._version = VersionInfo(9, 2, 0) return c @@ -40,7 +40,6 @@ def check(): def integration_check(): def _check(instance): c = PostgreSql('postgres', {}, [instance]) - c._is_9_2_or_above = mock.MagicMock() return c return _check diff --git a/postgres/tests/test_integration.py b/postgres/tests/test_integration.py index 39529f0cad1fe..4112fd2f1e075 100644 --- a/postgres/tests/test_integration.py +++ b/postgres/tests/test_integration.py @@ -158,7 +158,7 @@ def throw_exception_first_time(*args, **kwargs): def assert_state_clean(check): - assert check.version is None + assert check._version is None assert check.instance_metrics is None assert check.bgw_metrics is None assert check.archiver_metrics is None @@ -169,7 +169,7 @@ def assert_state_clean(check): def assert_state_set(check,): - assert check.version + assert check._version assert check.instance_metrics assert check.bgw_metrics if POSTGRES_VERSION != '9.3': diff --git a/postgres/tests/test_unit.py b/postgres/tests/test_unit.py index 280ea7d9ef0f6..67c0ea5485601 100644 --- a/postgres/tests/test_unit.py +++ b/postgres/tests/test_unit.py @@ -6,6 +6,7 @@ from mock import MagicMock from datadog_checks.postgres import util +from semver import VersionInfo pytestmark = pytest.mark.unit @@ -14,7 +15,7 @@ def test_get_instance_metrics_lt_92(check): """ check output when 9.2+ """ - check._is_9_2_or_above.return_value = False + check._version = VersionInfo(9, 1, 0) res = check._get_instance_metrics(False, False) assert res['metrics'] == util.COMMON_METRICS @@ -23,7 +24,7 @@ def test_get_instance_metrics_92(check): """ check output when <9.2 """ - check._is_9_2_or_above.return_value = True + check._version = VersionInfo(9, 2, 0) res = check._get_instance_metrics(False, False) assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS) @@ -34,7 +35,7 @@ def test_get_instance_metrics_state(check): """ res = check._get_instance_metrics(False, False) assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS) - check._is_9_2_or_above.side_effect = Exception # metrics were cached so this shouldn't be called + check._version = 'foo' # metrics were cached so this shouldn't be called res = check._get_instance_metrics([], False) assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS) From fb9784aaffb6c374114bbb8fe4c78e319a3174fb Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Mon, 21 Oct 2019 13:59:08 +0200 Subject: [PATCH 09/27] Add missing argument --- postgres/datadog_checks/postgres/postgres.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index d3c6b7d7c8aeb..24e5a121ae580 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -177,7 +177,7 @@ def _is_9_6_or_above(self, db): def _is_10_or_above(self, db): return self._is_above(db, [10, 0, 0]) - def _get_instance_metrics(self, database_size_metrics, collect_default_db): + def _get_instance_metrics(self, db, database_size_metrics, collect_default_db): """ Add NEWER_92_METRICS to the default set of COMMON_METRICS when server version is 9.2 or later. @@ -193,7 +193,7 @@ def _get_instance_metrics(self, database_size_metrics, collect_default_db): if metrics is None: # select the right set of metrics to collect depending on postgres version - if self._is_9_2_or_above(): + if self._is_9_2_or_above(db): self.instance_metrics = dict(COMMON_METRICS, **NEWER_92_METRICS) else: self.instance_metrics = dict(COMMON_METRICS) @@ -516,7 +516,7 @@ def _collect_stats( If custom_metrics is not an empty list, gather custom metrics defined in postgres.yaml """ - db_instance_metrics = self._get_instance_metrics(collect_database_size_metrics, collect_default_db) + db_instance_metrics = self._get_instance_metrics(db, collect_database_size_metrics, collect_default_db) bgw_instance_metrics = self._get_bgw_metrics(db) archiver_instance_metrics = self._get_archiver_metrics(db) From 59288196b95dbf91f59ef1e1453be34fb87daa3c Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Mon, 21 Oct 2019 15:20:27 +0200 Subject: [PATCH 10/27] Fix unit tests --- postgres/tests/conftest.py | 1 - postgres/tests/test_unit.py | 15 +++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/postgres/tests/conftest.py b/postgres/tests/conftest.py index e9f4081dc5ba5..4aa1d3d8d6ece 100644 --- a/postgres/tests/conftest.py +++ b/postgres/tests/conftest.py @@ -32,7 +32,6 @@ def dd_environment(e2e_instance): def check(): c = PostgreSql('postgres', {}, [{'dbname': 'dbname', 'host': 'localhost', 'port': '5432'}]) c._is_9_2_or_above = mock.MagicMock() - PostgreSql._known_servers = set() # reset the global state return c diff --git a/postgres/tests/test_unit.py b/postgres/tests/test_unit.py index 9c9e30b7008c6..aa8c9ef8cce37 100644 --- a/postgres/tests/test_unit.py +++ b/postgres/tests/test_unit.py @@ -16,7 +16,7 @@ def test_get_instance_metrics_lt_92(check): check output when 9.2+ """ check._is_9_2_or_above.return_value = False - res = check._get_instance_metrics(False, False) + res = check._get_instance_metrics('dbname', False, False) assert res['metrics'] == util.COMMON_METRICS @@ -25,7 +25,7 @@ def test_get_instance_metrics_92(check): check output when <9.2 """ check._is_9_2_or_above.return_value = True - res = check._get_instance_metrics(False, False) + res = check._get_instance_metrics('dbname', False, False) assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS) @@ -33,10 +33,10 @@ def test_get_instance_metrics_state(check): """ Ensure data is consistent when the function is called more than once """ - res = check._get_instance_metrics(False, False) + res = check._get_instance_metrics('dbname', False, False) assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS) check._is_9_2_or_above.side_effect = Exception # metrics were cached so this shouldn't be called - res = check._get_instance_metrics([], False) + res = check._get_instance_metrics('dbname', [], False) assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS) @@ -47,7 +47,7 @@ def test_get_instance_metrics_database_size_metrics(check): expected = util.COMMON_METRICS expected.update(util.NEWER_92_METRICS) expected.update(util.DATABASE_SIZE_METRICS) - res = check._get_instance_metrics(True, False) + res = check._get_instance_metrics('dbname', True, False) assert res['metrics'] == expected @@ -56,11 +56,11 @@ def test_get_instance_with_default(check): Test the contents of the query string with different `collect_default_db` values """ collect_default_db = False - res = check._get_instance_metrics(False, collect_default_db) + res = check._get_instance_metrics('dbname', False, collect_default_db) assert " AND psd.datname not ilike 'postgres'" in res['query'] collect_default_db = True - res = check._get_instance_metrics(False, collect_default_db) + res = check._get_instance_metrics('dbname', False, collect_default_db) assert " AND psd.datname not ilike 'postgres'" not in res['query'] @@ -123,7 +123,6 @@ def test_is_above(check): # Test beta version above db.cursor().fetchone.return_value = ['11beta4'] check._clean_state() - check._clean_state() assert check._is_above(db, [11, -1, 3]) # Test beta version against official version From 81f9d5fd773af7b5388be339ff9e4bb2efb4f3be Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Tue, 22 Oct 2019 17:43:02 +0200 Subject: [PATCH 11/27] Remove unused argument --- postgres/datadog_checks/postgres/postgres.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 155facedd635f..74f1f8f9c747e 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -304,7 +304,7 @@ def _get_archiver_metrics(self): 'relation': False, } - def _get_replication_metrics(self, db): + def _get_replication_metrics(self): """ Use either REPLICATION_METRICS_10, REPLICATION_METRICS_9_1, or REPLICATION_METRICS_9_1 + REPLICATION_METRICS_9_2, depending on the postgres version. From 0fe217d51fa2bae2f860063a260135eabd85c70d Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Wed, 23 Oct 2019 10:27:50 +0200 Subject: [PATCH 12/27] Style fix --- postgres/datadog_checks/postgres/postgres.py | 1 - 1 file changed, 1 deletion(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 8f16d9f1a66b3..51ff029d776f0 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -768,7 +768,6 @@ def check(self, instance): tags = self.tags # Collect metrics - db = None try: # Check version self._connect(host, port, user, password, dbname, ssl, tags) From 768a3565b0b2a10010692eb410a9039135760d32 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Wed, 23 Oct 2019 12:04:49 +0200 Subject: [PATCH 13/27] Add semver dependency --- postgres/requirements.in | 1 + 1 file changed, 1 insertion(+) diff --git a/postgres/requirements.in b/postgres/requirements.in index fe939cce41bb9..028314428653f 100644 --- a/postgres/requirements.in +++ b/postgres/requirements.in @@ -1 +1,2 @@ psycopg2-binary==2.8.4 +semver==2.8.1 From d12d774cf26dca34920dfec269bfe3ac3404982a Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Wed, 23 Oct 2019 12:16:10 +0200 Subject: [PATCH 14/27] Add semver dependency to agent_requirements, create version constants --- .../base/data/agent_requirements.in | 1 + postgres/datadog_checks/postgres/postgres.py | 4 +-- .../datadog_checks/postgres/version_utils.py | 25 +++++++++++++------ 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/datadog_checks_base/datadog_checks/base/data/agent_requirements.in b/datadog_checks_base/datadog_checks/base/data/agent_requirements.in index 6566d3030d4d0..9f38a617ec884 100644 --- a/datadog_checks_base/datadog_checks/base/data/agent_requirements.in +++ b/datadog_checks_base/datadog_checks/base/data/agent_requirements.in @@ -53,6 +53,7 @@ requests_ntlm==1.1.0 scandir==1.8 securesystemslib[crypto,pynacl]==0.11.3 selectors34==1.2.0; sys_platform == 'win32' and python_version < '3.4' +semver==2.8.1 serpent==1.27; sys_platform == 'win32' service_identity[idna]==18.1.0 simplejson==3.6.5 diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 4e9e6b60e602c..aa51d721406d9 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -8,7 +8,7 @@ import psycopg2 from .version_utils import get_version, is_above, is_9_2_or_above, is_9_1_or_above, is_10_or_above, is_9_6_or_above, \ - is_8_3_or_above + is_8_3_or_above, V9 from six import iteritems from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative @@ -337,7 +337,7 @@ def _query_scope(self, cursor, scope, instance_tags, is_custom_metrics, relation if scope is None: return None - if scope == REPLICATION_METRICS or not is_above(self.version, "9.0.0"): + if scope == REPLICATION_METRICS or not is_above(self.version, V9): log_func = self.log.debug else: log_func = self.log.warning diff --git a/postgres/datadog_checks/postgres/version_utils.py b/postgres/datadog_checks/postgres/version_utils.py index c781d800642e9..c3cd213e97bf9 100644 --- a/postgres/datadog_checks/postgres/version_utils.py +++ b/postgres/datadog_checks/postgres/version_utils.py @@ -5,6 +5,15 @@ import semver +V8_3 = semver.parse("8.3.0") +V9 = semver.parse("9.0.0") +V9_1 = semver.parse("9.1.0") +V9_2 = semver.parse("9.2.0") +V9_4 = semver.parse("9.4.0") +V9_6 = semver.parse("9.6.0") +V10 = semver.parse("10.0.0") + + def get_version(db): cursor = db.cursor() cursor.execute('SHOW SERVER_VERSION;') @@ -29,30 +38,32 @@ def get_version(db): def is_above(version, version_to_compare): + if isinstance(version_to_compare, str): + version_to_compare = semver.parse(version_to_compare) if version is None: return False - return version >= semver.parse(version_to_compare) + return version >= version_to_compare def is_8_3_or_above(version): - return is_above(version, "8.3.0") + return is_above(version, V8_3) def is_9_1_or_above(version): - return is_above(version, "9.1.0") + return is_above(version, V9_1) def is_9_2_or_above(version): - return is_above(version, "9.2.0") + return is_above(version, V9_2) def is_9_4_or_above(version): - return is_above(version, "9.4.0") + return is_above(version, V9_4) def is_9_6_or_above(version): - return is_above(version, "9.6.0") + return is_above(version, V9_6) def is_10_or_above(version): - return is_above(version, "10.0.0") + return is_above(version, V10) From a0401c619e4552109ae7ff1273d64592c86682f4 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Wed, 23 Oct 2019 12:18:18 +0200 Subject: [PATCH 15/27] Style fix --- postgres/datadog_checks/postgres/postgres.py | 12 ++++++++++-- postgres/datadog_checks/postgres/version_utils.py | 2 +- postgres/tests/conftest.py | 2 +- postgres/tests/test_unit.py | 2 +- postgres/tests/test_version_utils.py | 4 ++-- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index aa51d721406d9..0b9c530220b72 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -7,8 +7,6 @@ from contextlib import closing import psycopg2 -from .version_utils import get_version, is_above, is_9_2_or_above, is_9_1_or_above, is_10_or_above, is_9_6_or_above, \ - is_8_3_or_above, V9 from six import iteritems from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative @@ -42,6 +40,16 @@ STATIO_METRICS, fmt, ) +from .version_utils import ( + V9, + get_version, + is_8_3_or_above, + is_9_1_or_above, + is_9_2_or_above, + is_9_6_or_above, + is_10_or_above, + is_above, +) MAX_CUSTOM_RESULTS = 100 TABLE_COUNT_LIMIT = 200 diff --git a/postgres/datadog_checks/postgres/version_utils.py b/postgres/datadog_checks/postgres/version_utils.py index c3cd213e97bf9..ea73d738f7e18 100644 --- a/postgres/datadog_checks/postgres/version_utils.py +++ b/postgres/datadog_checks/postgres/version_utils.py @@ -2,8 +2,8 @@ # All rights reserved # Licensed under Simplified BSD License (see LICENSE) import re -import semver +import semver V8_3 = semver.parse("8.3.0") V9 = semver.parse("9.0.0") diff --git a/postgres/tests/conftest.py b/postgres/tests/conftest.py index 9f8c9d6b6cd02..54c4d093105b2 100644 --- a/postgres/tests/conftest.py +++ b/postgres/tests/conftest.py @@ -6,10 +6,10 @@ import mock import psycopg2 import pytest +from semver import VersionInfo from datadog_checks.dev import WaitFor, docker_run from datadog_checks.postgres import PostgreSql -from semver import VersionInfo from .common import DB_NAME, HOST, PASSWORD, PORT, USER diff --git a/postgres/tests/test_unit.py b/postgres/tests/test_unit.py index 30d113f04d1a2..13a3ea6fe57ed 100644 --- a/postgres/tests/test_unit.py +++ b/postgres/tests/test_unit.py @@ -4,9 +4,9 @@ import psycopg2 import pytest from mock import MagicMock +from semver import VersionInfo from datadog_checks.postgres import util -from semver import VersionInfo pytestmark = pytest.mark.unit diff --git a/postgres/tests/test_version_utils.py b/postgres/tests/test_version_utils.py index b17457d92e05e..378f7664e5f46 100644 --- a/postgres/tests/test_version_utils.py +++ b/postgres/tests/test_version_utils.py @@ -2,9 +2,9 @@ # All rights reserved # Licensed under Simplified BSD License (see LICENSE) import pytest - -from semver import VersionInfo from mock import MagicMock +from semver import VersionInfo + from datadog_checks.postgres.version_utils import get_version, is_above pytestmark = pytest.mark.unit From 6b3af0d13de42d5d66b4f7c306d644be49730edc Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Wed, 23 Oct 2019 12:56:10 +0200 Subject: [PATCH 16/27] Fix tests --- postgres/datadog_checks/postgres/postgres.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 0b9c530220b72..3452c9ed9309a 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -73,6 +73,7 @@ def __init__(self, name, init_config, instances): AgentCheck.__init__(self, name, init_config, instances) self._clean_state() self.db = None + self._version = None self.custom_metrics = None # Deprecate custom_metrics in favor of custom_queries @@ -270,7 +271,7 @@ def _get_replication_metrics(self): Uses a dictionnary to save the result for each instance """ metrics = self.replication_metrics - if is_10_or_above(self.get_version()) and metrics is None: + if is_10_or_above(self.version) and metrics is None: self.replication_metrics = dict(REPLICATION_METRICS_10) metrics = self.replication_metrics elif is_9_1_or_above(self.version) and metrics is None: @@ -732,7 +733,7 @@ def check(self, instance): self._connect(host, port, user, password, dbname, ssl, tags) if tag_replication_role: tags.extend(["replication_role:{}".format(self._get_replication_role())]) - version = self._get_version() + version = self.version self.log.debug("Running check against version %s" % version) self._collect_stats( user, From c35f28a2cec3a4b1298a09237c920ba1643fa6c1 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Thu, 24 Oct 2019 12:31:37 +0200 Subject: [PATCH 17/27] fix metadata --- postgres/datadog_checks/postgres/postgres.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index bdf1dd32ba43e..b88e7542ac72b 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -134,7 +134,7 @@ def _get_replication_role(self): def version(self): if self._version is None: self._version = get_version(self.db) - self.service_metadata('version', [self._version['major'], self._version['minor'], self._version['patch']]) + self.service_metadata('version', [self._version.major, self._version.minor, self._version.patch]) return self._version def _get_instance_metrics(self, database_size_metrics, collect_default_db): From 0ddbb8a9c72a148d277c92e609f638a2b0d56421 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Thu, 24 Oct 2019 12:54:16 +0200 Subject: [PATCH 18/27] Add missing import --- postgres/datadog_checks/postgres/postgres.py | 1 + 1 file changed, 1 insertion(+) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index b88e7542ac72b..fda645b51172e 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -46,6 +46,7 @@ is_8_3_or_above, is_9_1_or_above, is_9_2_or_above, + is_9_4_or_above, is_9_6_or_above, is_10_or_above, is_above, From e47279886ab8d08f792927a8fdb9ea47e947d3f3 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Thu, 24 Oct 2019 13:25:08 +0200 Subject: [PATCH 19/27] Fix tests --- postgres/datadog_checks/postgres/postgres.py | 2 +- postgres/tests/test_integration.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index fda645b51172e..101774d31c0a8 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -289,7 +289,7 @@ def _get_activity_metrics(self, user): metrics_data = self.activity_metrics if metrics_data is None: - query = ACTIVITY_QUERY_10 if self._is_10_or_above(db) else ACTIVITY_QUERY_LT_10 + query = ACTIVITY_QUERY_10 if is_10_or_above(self.version) else ACTIVITY_QUERY_LT_10 if is_9_6_or_above(self.version): metrics_query = ACTIVITY_METRICS_9_6 elif is_9_2_or_above(self.version): diff --git a/postgres/tests/test_integration.py b/postgres/tests/test_integration.py index 4112fd2f1e075..806c28c5180a3 100644 --- a/postgres/tests/test_integration.py +++ b/postgres/tests/test_integration.py @@ -8,6 +8,7 @@ import pytest from datadog_checks.postgres import PostgreSql +from semver import VersionInfo from .common import DB_NAME, HOST, PORT, POSTGRES_VERSION, check_bgw_metrics, check_common_metrics from .utils import requires_over_10 @@ -126,13 +127,13 @@ def test_activity_metrics(aggregator, integration_check, pg_instance): def test_wrong_version(aggregator, integration_check, pg_instance): check = integration_check(pg_instance) # Enforce to cache wrong version - check.version = [9, 6, 0] + check._version = VersionInfo(*[9, 6, 0]) check.check(pg_instance) assert_state_clean(check) check.check(pg_instance) - assert check.version[0] == int(POSTGRES_VERSION) + assert check._version.major == int(POSTGRES_VERSION) assert_state_set(check) From 6ab4df99c22e9263bf135fb38658082195711ede Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Thu, 24 Oct 2019 13:28:17 +0200 Subject: [PATCH 20/27] Remove unneded var --- postgres/datadog_checks/postgres/postgres.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 101774d31c0a8..52e22164bc71f 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -733,8 +733,7 @@ def check(self, instance): self._connect(host, port, user, password, dbname, ssl, tags) if tag_replication_role: tags.extend(["replication_role:{}".format(self._get_replication_role())]) - version = self.version - self.log.debug("Running check against version %s" % version) + self.log.debug("Running check against version %s" % self.version) self._collect_stats( user, tags, From fb0cba898200b1c2b97119d4d79d0104e5c84a4f Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Thu, 24 Oct 2019 14:07:00 +0200 Subject: [PATCH 21/27] Style fix --- postgres/datadog_checks/postgres/postgres.py | 2 +- postgres/tests/conftest.py | 1 - postgres/tests/test_integration.py | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 52e22164bc71f..50c993b0b5eb3 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -733,7 +733,7 @@ def check(self, instance): self._connect(host, port, user, password, dbname, ssl, tags) if tag_replication_role: tags.extend(["replication_role:{}".format(self._get_replication_role())]) - self.log.debug("Running check against version %s" % self.version) + self.log.debug("Running check against version %s", self.version) self._collect_stats( user, tags, diff --git a/postgres/tests/conftest.py b/postgres/tests/conftest.py index 54c4d093105b2..735e14bd8ca03 100644 --- a/postgres/tests/conftest.py +++ b/postgres/tests/conftest.py @@ -3,7 +3,6 @@ # Licensed under Simplified BSD License (see LICENSE) import os -import mock import psycopg2 import pytest from semver import VersionInfo diff --git a/postgres/tests/test_integration.py b/postgres/tests/test_integration.py index 806c28c5180a3..875dc723cfd17 100644 --- a/postgres/tests/test_integration.py +++ b/postgres/tests/test_integration.py @@ -6,9 +6,9 @@ import mock import psycopg2 import pytest +from semver import VersionInfo from datadog_checks.postgres import PostgreSql -from semver import VersionInfo from .common import DB_NAME, HOST, PORT, POSTGRES_VERSION, check_bgw_metrics, check_common_metrics from .utils import requires_over_10 From 19f0d998faba0790dd1fae7a3e0e96846f2b4fb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Herv=C3=A9?= Date: Mon, 28 Oct 2019 14:15:07 +0100 Subject: [PATCH 22/27] Update requirements when updating check We need to pass the requirements explicitly when updating a check to its development version. --- datadog_checks_dev/datadog_checks/dev/tooling/e2e/docker.py | 5 ++++- datadog_checks_dev/datadog_checks/dev/tooling/e2e/local.py | 6 +++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/e2e/docker.py b/datadog_checks_dev/datadog_checks/dev/tooling/e2e/docker.py index 050271f2b0827..7554b864d6ae1 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/e2e/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/e2e/docker.py @@ -5,7 +5,7 @@ from contextlib import contextmanager from ...subprocess import run_command -from ...utils import path_join +from ...utils import file_exists, path_join from ..constants import get_root from .agent import ( DEFAULT_AGENT_VERSION, @@ -178,12 +178,15 @@ def update_check(self): command = ['docker', 'exec', self.container_name] command.extend(get_pip_exe(self.python_version)) command.extend(('install', '-e', self.check_mount_dir)) + if file_exists(path_join(get_root(), self.check, 'requirements.in')): + command.extend(('-r', path_join(self.check_mount_dir, 'requirements.in'))) run_command(command, capture=True, check=True) def update_base_package(self): command = ['docker', 'exec', self.container_name] command.extend(get_pip_exe(self.python_version)) command.extend(('install', '-e', self.base_mount_dir)) + command.extend(('-r', path_join(self.base_mount_dir, 'requirements.in'))) run_command(command, capture=True, check=True) def update_agent(self): diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/e2e/local.py b/datadog_checks_dev/datadog_checks/dev/tooling/e2e/local.py index 109edba7d35aa..c79a81db2ed86 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/e2e/local.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/e2e/local.py @@ -170,12 +170,16 @@ def run_check( def update_check(self): command = get_pip_exe(self.python_version, self.platform) - command.extend(('install', '-e', path_join(get_root(), self.check))) + path = path_join(get_root(), self.check) + command.extend(('install', '-e', path)) + if file_exists(path_join(path, 'requirements.in')): + command.extend(('-r', path_join(path, 'requirements.in'))) return run_command(command, capture=True, check=True) def update_base_package(self): command = get_pip_exe(self.python_version, self.platform) command.extend(('install', '-e', self.base_package)) + command.extend(('-r', path_join(self.base_package, 'requirements.in'))) return run_command(command, capture=True, check=True) def update_agent(self): From 689f4850cd3943a0827b14f3a3dd80f87b874992 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Tue, 29 Oct 2019 15:42:38 +0100 Subject: [PATCH 23/27] Log error when cannot determine version --- postgres/datadog_checks/postgres/postgres.py | 6 +++++- postgres/datadog_checks/postgres/version_utils.py | 1 + postgres/tests/test_version_utils.py | 6 +++++- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 50c993b0b5eb3..f8f09246ff611 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -135,7 +135,11 @@ def _get_replication_role(self): def version(self): if self._version is None: self._version = get_version(self.db) - self.service_metadata('version', [self._version.major, self._version.minor, self._version.patch]) + if isinstance(self._verison, str): + self.log.error("Could not determine postgres version. Obtained {}", self._version) + self._version = None + else: + self.service_metadata('version', [self._version.major, self._version.minor, self._version.patch]) return self._version def _get_instance_metrics(self, database_size_metrics, collect_default_db): diff --git a/postgres/datadog_checks/postgres/version_utils.py b/postgres/datadog_checks/postgres/version_utils.py index ea73d738f7e18..6d8b055b8b08a 100644 --- a/postgres/datadog_checks/postgres/version_utils.py +++ b/postgres/datadog_checks/postgres/version_utils.py @@ -35,6 +35,7 @@ def get_version(db): version[1] = -1 version = [int(part) for part in version] return semver.VersionInfo(*version) + return raw_version def is_above(version, version_to_compare): diff --git a/postgres/tests/test_version_utils.py b/postgres/tests/test_version_utils.py index 378f7664e5f46..6d7c64321dfa3 100644 --- a/postgres/tests/test_version_utils.py +++ b/postgres/tests/test_version_utils.py @@ -32,10 +32,14 @@ def test_get_version(): db.cursor().fetchone.return_value = ['11rc1'] assert get_version(db) == VersionInfo(11, -1, 1) - # Test #unknown# style versions + # Test #nightly# style versions db.cursor().fetchone.return_value = ['11nightly3'] assert get_version(db) == VersionInfo(11, -1, 3) + # Test #unknown# style versions + db.cursor().fetchone.return_value = ['dontKnow'] + assert get_version(db) == 'dontKnow' + def test_is_above(): """ From 6c5357399664e8f912e5c3d30312f5cdce977284 Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Tue, 29 Oct 2019 17:06:06 +0100 Subject: [PATCH 24/27] Remove custom comparison methods --- postgres/datadog_checks/postgres/postgres.py | 37 ++++++---------- .../datadog_checks/postgres/version_utils.py | 32 -------------- postgres/tests/test_version_utils.py | 44 +------------------ 3 files changed, 14 insertions(+), 99 deletions(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index f8f09246ff611..a4f2328dc6cfd 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -40,17 +40,7 @@ STATIO_METRICS, fmt, ) -from .version_utils import ( - V9, - get_version, - is_8_3_or_above, - is_9_1_or_above, - is_9_2_or_above, - is_9_4_or_above, - is_9_6_or_above, - is_10_or_above, - is_above, -) +from .version_utils import V8_3, V9, V9_1, V9_2, V9_4, V9_6, V10, get_version MAX_CUSTOM_RESULTS = 100 TABLE_COUNT_LIMIT = 200 @@ -158,7 +148,7 @@ def _get_instance_metrics(self, database_size_metrics, collect_default_db): if metrics is None: # select the right set of metrics to collect depending on postgres version - if is_9_2_or_above(self.version): + if self.version >= V9_2: self.instance_metrics = dict(COMMON_METRICS, **NEWER_92_METRICS) else: self.instance_metrics = dict(COMMON_METRICS) @@ -209,9 +199,9 @@ def _get_bgw_metrics(self): self.db_bgw_metrics.append(sub_key) self.bgw_metrics = dict(COMMON_BGW_METRICS) - if is_9_1_or_above(self.version): + if self.version >= V9_1: self.bgw_metrics.update(NEWER_91_BGW_METRICS) - if is_9_2_or_above(self.version): + if self.version >= V9_2: self.bgw_metrics.update(NEWER_92_BGW_METRICS) metrics = self.bgw_metrics @@ -242,7 +232,7 @@ def _get_archiver_metrics(self): # the table, mirroring _get_bgw_metrics() metrics = self.archiver_metrics - if metrics is None and is_9_4_or_above(self.version): + if metrics is None and self.version >= V9_4: # Collect from only one instance. See _get_bgw_metrics() for details on why. sub_key = self.key[:2] if sub_key in self.db_archiver_metrics: @@ -275,12 +265,12 @@ def _get_replication_metrics(self): Uses a dictionnary to save the result for each instance """ metrics = self.replication_metrics - if is_10_or_above(self.version) and metrics is None: + if self.version >= V10 and metrics is None: self.replication_metrics = dict(REPLICATION_METRICS_10) metrics = self.replication_metrics - elif is_9_1_or_above(self.version) and metrics is None: + elif self.version >= V9_1 and metrics is None: self.replication_metrics = dict(REPLICATION_METRICS_9_1) - if is_9_2_or_above(self.version): + if self.version >= V9_2: self.replication_metrics.update(REPLICATION_METRICS_9_2) metrics = self.replication_metrics return metrics @@ -293,12 +283,12 @@ def _get_activity_metrics(self, user): metrics_data = self.activity_metrics if metrics_data is None: - query = ACTIVITY_QUERY_10 if is_10_or_above(self.version) else ACTIVITY_QUERY_LT_10 - if is_9_6_or_above(self.version): + query = ACTIVITY_QUERY_10 if self.version >= V10 else ACTIVITY_QUERY_LT_10 + if self.version >= V9_6: metrics_query = ACTIVITY_METRICS_9_6 - elif is_9_2_or_above(self.version): + elif self.version >= V9_2: metrics_query = ACTIVITY_METRICS_9_2 - elif is_8_3_or_above(self.version): + elif self.version >= V8_3: metrics_query = ACTIVITY_METRICS_8_3 else: metrics_query = ACTIVITY_METRICS_LT_8_3 @@ -349,8 +339,7 @@ def _build_relations_config(self, yamlconfig): def _query_scope(self, cursor, scope, instance_tags, is_custom_metrics, relations_config): if scope is None: return None - - if scope == REPLICATION_METRICS or not is_above(self.version, V9): + if scope == REPLICATION_METRICS or not self.version >= V9: log_func = self.log.debug else: log_func = self.log.warning diff --git a/postgres/datadog_checks/postgres/version_utils.py b/postgres/datadog_checks/postgres/version_utils.py index 6d8b055b8b08a..09a127b16e198 100644 --- a/postgres/datadog_checks/postgres/version_utils.py +++ b/postgres/datadog_checks/postgres/version_utils.py @@ -36,35 +36,3 @@ def get_version(db): version = [int(part) for part in version] return semver.VersionInfo(*version) return raw_version - - -def is_above(version, version_to_compare): - if isinstance(version_to_compare, str): - version_to_compare = semver.parse(version_to_compare) - if version is None: - return False - return version >= version_to_compare - - -def is_8_3_or_above(version): - return is_above(version, V8_3) - - -def is_9_1_or_above(version): - return is_above(version, V9_1) - - -def is_9_2_or_above(version): - return is_above(version, V9_2) - - -def is_9_4_or_above(version): - return is_above(version, V9_4) - - -def is_9_6_or_above(version): - return is_above(version, V9_6) - - -def is_10_or_above(version): - return is_above(version, V10) diff --git a/postgres/tests/test_version_utils.py b/postgres/tests/test_version_utils.py index 6d7c64321dfa3..d8989021a952a 100644 --- a/postgres/tests/test_version_utils.py +++ b/postgres/tests/test_version_utils.py @@ -5,7 +5,7 @@ from mock import MagicMock from semver import VersionInfo -from datadog_checks.postgres.version_utils import get_version, is_above +from datadog_checks.postgres.version_utils import get_version pytestmark = pytest.mark.unit @@ -39,45 +39,3 @@ def test_get_version(): # Test #unknown# style versions db.cursor().fetchone.return_value = ['dontKnow'] assert get_version(db) == 'dontKnow' - - -def test_is_above(): - """ - Test _is_above() to make sure the check is properly determining order of versions - """ - # Test major versions - version = VersionInfo(10, 5, 4) - assert is_above(version, "9.5.4") - assert is_above(version, "11.0.0") is False - - # Test minor versions - assert is_above(version, "10.4.4") - assert is_above(version, "10.6.4") is False - - # Test patch versions - assert is_above(version, "10.5.3") - assert is_above(version, "10.5.5") is False - - # Test same version, _is_above() returns True for greater than or equal to - assert is_above(version, "10.5.4") - - # Test beta version above - db = MagicMock() - db.cursor().fetchone.return_value = ['11beta4'] - version = get_version(db) - assert version > VersionInfo(11, -1, 3) - - # Test beta version against official version - version = VersionInfo(11, 0, 0) - assert version > VersionInfo(11, -1, 3) - - # Test versions of unequal length - db.cursor().fetchone.return_value = ['10.0'] - version = get_version(db) - assert is_above(version, "10.0.0") - assert is_above(version, "10.0.1") is False - - # Test return value is not a list - db.cursor().fetchone.return_value = "foo" - version = get_version(db) - assert is_above(version, "10.0.0") is False From 1ddb84f98cb6b41503c0238d359581d13597d6aa Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Wed, 30 Oct 2019 10:27:41 +0100 Subject: [PATCH 25/27] Fix typo --- postgres/datadog_checks/postgres/postgres.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index a4f2328dc6cfd..958976095e764 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -125,7 +125,7 @@ def _get_replication_role(self): def version(self): if self._version is None: self._version = get_version(self.db) - if isinstance(self._verison, str): + if isinstance(self._version, str): self.log.error("Could not determine postgres version. Obtained {}", self._version) self._version = None else: From d2efb46678cdcb6aec566524ca1c4d57750f289d Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Wed, 30 Oct 2019 17:01:36 +0100 Subject: [PATCH 26/27] Bump semver and throw exception if cannot determine version --- .../datadog_checks/base/data/agent_requirements.in | 2 +- postgres/datadog_checks/postgres/postgres.py | 6 +----- postgres/datadog_checks/postgres/version_utils.py | 2 +- postgres/requirements.in | 2 +- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/datadog_checks_base/datadog_checks/base/data/agent_requirements.in b/datadog_checks_base/datadog_checks/base/data/agent_requirements.in index 9f38a617ec884..640f05fee683c 100644 --- a/datadog_checks_base/datadog_checks/base/data/agent_requirements.in +++ b/datadog_checks_base/datadog_checks/base/data/agent_requirements.in @@ -53,7 +53,7 @@ requests_ntlm==1.1.0 scandir==1.8 securesystemslib[crypto,pynacl]==0.11.3 selectors34==1.2.0; sys_platform == 'win32' and python_version < '3.4' -semver==2.8.1 +semver==2.9.0 serpent==1.27; sys_platform == 'win32' service_identity[idna]==18.1.0 simplejson==3.6.5 diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 958976095e764..4c5ccc708e388 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -125,11 +125,7 @@ def _get_replication_role(self): def version(self): if self._version is None: self._version = get_version(self.db) - if isinstance(self._version, str): - self.log.error("Could not determine postgres version. Obtained {}", self._version) - self._version = None - else: - self.service_metadata('version', [self._version.major, self._version.minor, self._version.patch]) + self.service_metadata('version', [self._version.major, self._version.minor, self._version.patch]) return self._version def _get_instance_metrics(self, database_size_metrics, collect_default_db): diff --git a/postgres/datadog_checks/postgres/version_utils.py b/postgres/datadog_checks/postgres/version_utils.py index 09a127b16e198..52b7af678be7a 100644 --- a/postgres/datadog_checks/postgres/version_utils.py +++ b/postgres/datadog_checks/postgres/version_utils.py @@ -35,4 +35,4 @@ def get_version(db): version[1] = -1 version = [int(part) for part in version] return semver.VersionInfo(*version) - return raw_version + raise Exception("Cannot determine which version is {}".format(raw_version)) diff --git a/postgres/requirements.in b/postgres/requirements.in index 028314428653f..2bf3b334b33d0 100644 --- a/postgres/requirements.in +++ b/postgres/requirements.in @@ -1,2 +1,2 @@ psycopg2-binary==2.8.4 -semver==2.8.1 +semver==2.9.0 From 7735c419fbf49368cccd828e75c83af10b69759a Mon Sep 17 00:00:00 2001 From: Julia Simon <611228+hithwen@users.noreply.github.com> Date: Wed, 30 Oct 2019 17:15:59 +0100 Subject: [PATCH 27/27] Add exception test --- postgres/tests/test_version_utils.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/postgres/tests/test_version_utils.py b/postgres/tests/test_version_utils.py index d8989021a952a..0e8033566b553 100644 --- a/postgres/tests/test_version_utils.py +++ b/postgres/tests/test_version_utils.py @@ -36,6 +36,10 @@ def test_get_version(): db.cursor().fetchone.return_value = ['11nightly3'] assert get_version(db) == VersionInfo(11, -1, 3) - # Test #unknown# style versions + +def test_throws_exception_for_unknown_version_format(): + db = MagicMock() db.cursor().fetchone.return_value = ['dontKnow'] - assert get_version(db) == 'dontKnow' + with pytest.raises(Exception) as e: + get_version(db) + assert e.value.args[0] == "Cannot determine which version is dontKnow"