diff --git a/.circleci/config.yml b/.circleci/config.yml index 0cf1a65..7430186 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -14,12 +14,19 @@ jobs: virtualenv -p python3 /usr/local/share/virtualenvs/tap-zendesk source /usr/local/share/virtualenvs/tap-zendesk/bin/activate pip install .[test] + pip install coverage - run: name: 'pylint' command: | source /usr/local/share/virtualenvs/tap-zendesk/bin/activate - make test + pylint tap_zendesk -d missing-docstring,invalid-name,line-too-long,too-many-locals,too-few-public-methods,fixme,stop-iteration-return,too-many-branches,useless-import-alias,no-else-return,logging-not-lazy + nosetests --with-coverage --cover-erase --cover-package=tap_zendesk --cover-html-dir=htmlcov test/unittests + coverage html - add_ssh_keys + - store_test_results: + path: test_output/report.xml + - store_artifacts: + path: htmlcov - run: name: 'Integration Tests' command: | diff --git a/test/base.py b/test/base.py index ed31d45..0953a3a 100644 --- a/test/base.py +++ b/test/base.py @@ -1,57 +1,352 @@ import unittest import os import tap_tester.connections as connections -import tap_tester.menagerie as menagerie - +import tap_tester.menagerie as menagerie +import tap_tester.runner as runner +from datetime import datetime as dt +from datetime import timedelta +import dateutil.parser +import pytz class ZendeskTest(unittest.TestCase): + start_date = "" + DATETIME_FMT = { + "%Y-%m-%dT%H:%M:%SZ", + "%Y-%m-%d %H:%M:%S", + "%Y-%m-%dT%H:%M:%S.%fZ" + } + START_DATE_FORMAT = "%Y-%m-%dT00:00:00Z" + PRIMARY_KEYS = "table-key-properties" + REPLICATION_METHOD = "forced-replication-method" + REPLICATION_KEYS = "valid-replication-keys" + FULL_TABLE = "FULL_TABLE" + INCREMENTAL = "INCREMENTAL" + OBEYS_START_DATE = "obey-start-date" + def tap_name(self): return "tap-zendesk" + def setUp(self): + required_env = { + "TAP_ZENDESK_CLIENT_ID", + "TAP_ZENDESK_CLIENT_SECRET", + "TAP_ZENDESK_ACCESS_TOKEN", + } + missing_envs = [v for v in required_env if not os.getenv(v)] + if missing_envs: + raise Exception("set " + ", ".join(missing_envs)) + def get_type(self): return "platform.zendesk" + def get_credentials(self): + return { + 'access_token': os.getenv('TAP_ZENDESK_ACCESS_TOKEN'), + 'client_id': os.getenv('TAP_ZENDESK_CLIENT_ID'), + 'client_secret': os.getenv('TAP_ZENDESK_CLIENT_SECRET') + } - def required_environment_variables(self): - return set(['TAP_ZENDESK_CLIENT_ID', - 'TAP_ZENDESK_CLIENT_SECRET', - 'TAP_ZENDESK_ACCESS_TOKEN', - ]) - - def setUp(self): - missing_envs = [x for x in self.required_environment_variables() if os.getenv(x) is None] - if missing_envs: - raise Exception("Missing environment variables, please set {}." .format(missing_envs)) - - def get_properties(self): - return { - 'start_date' : '2017-01-01T00:00:00Z', - 'subdomain': 'rjmdev', - 'marketplace_app_id': int(os.getenv('TAP_ZENDESK_MARKETPLACE_APP_ID')) or 0, - 'marketplace_name': os.getenv('TAP_ZENDESK_MARKETPLACE_NAME') or "", - 'marketplace_organization_id': int(os.getenv('TAP_ZENDESK_MARKETPLACE_ORGANIZATION_ID')) or 0, - 'search_window_size': '2592000'# seconds in a month + def get_properties(self, original: bool = True): + return_value = { + "start_date" : "2017-01-01T00:00:00Z", + "subdomain": "rjmdev", + "marketplace_app_id": int(os.getenv("TAP_ZENDESK_MARKETPLACE_APP_ID")) or 0, + "marketplace_name": os.getenv("TAP_ZENDESK_MARKETPLACE_NAME") or "", + "marketplace_organization_id": int(os.getenv("TAP_ZENDESK_MARKETPLACE_ORGANIZATION_ID")) or 0, + "search_window_size": "2592000"# seconds in a month } + if original: + return return_value - def get_credentials(self): - return {'access_token': os.getenv('TAP_ZENDESK_ACCESS_TOKEN'), - 'client_id': os.getenv('TAP_ZENDESK_CLIENT_ID'), - 'client_secret': os.getenv('TAP_ZENDESK_CLIENT_SECRET')} + # Reassign start date + return_value["start_date"] = self.start_date + return return_value - def expected_check_streams(self): + def expected_metadata(self): return { - 'groups', - 'group_memberships', - 'macros', - 'organizations', - 'satisfaction_ratings', - 'sla_policies', - 'tags', - 'ticket_comments', - 'ticket_fields', - 'ticket_forms', - 'ticket_metrics', - 'tickets', - 'users', - 'ticket_audits' + "groups": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updated_at"}, + self.OBEYS_START_DATE: True + }, + "group_memberships": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updated_at"}, + self.OBEYS_START_DATE: True + }, + "macros": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updated_at"}, + self.OBEYS_START_DATE: True + }, + "organizations": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updated_at"}, + self.OBEYS_START_DATE: True + }, + "satisfaction_ratings": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updated_at"}, + self.OBEYS_START_DATE: True + }, + "sla_policies": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.FULL_TABLE, + self.OBEYS_START_DATE: False + }, + "tags": { + self.PRIMARY_KEYS: {"name"}, + self.REPLICATION_METHOD: self.FULL_TABLE, + self.OBEYS_START_DATE: False + }, + "ticket_comments": { + # ticket_comments is child stream of tickets, and tickets is incremental stream. + # But it does not save its own bookmark. It fetches records based on the record of the parent stream. + # That's why make it FULL_TABLE + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.FULL_TABLE, + self.OBEYS_START_DATE: False + }, + "ticket_fields": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updated_at"}, + self.OBEYS_START_DATE: True + }, + "ticket_forms": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updated_at"}, + self.OBEYS_START_DATE: True + }, + "ticket_metrics": { + # ticket_metrics is child stream of tickets, and tickets is incremental stream. + # But it does not save its own bookmark. It fetches records based on the record of the parent stream. + # That's why make it FULL_TABLE + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.FULL_TABLE, + self.OBEYS_START_DATE: False + }, + "tickets": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"generated_timestamp"}, + self.OBEYS_START_DATE: True + }, + "users": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updated_at"}, + self.OBEYS_START_DATE: True + }, + "ticket_audits": { + # ticket_audits is child stream of tickets, and tickets is incremental stream. + # But it does not save its own bookmark. It fetches records based on the record of the parent stream. + # That's why make it FULL_TABLE + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.FULL_TABLE, + self.OBEYS_START_DATE: False + } } + + def expected_check_streams(self): + return set(self.expected_metadata().keys()) + + def expected_replication_keys(self): + return {table: properties.get(self.REPLICATION_KEYS, set()) for table, properties + in self.expected_metadata().items()} + + def expected_primary_keys(self): + return {table: properties.get(self.PRIMARY_KEYS, set()) for table, properties + in self.expected_metadata().items()} + + def expected_replication_method(self): + return {table: properties.get(self.REPLICATION_METHOD, set()) for table, properties + in self.expected_metadata().items()} + + def expected_automatic_fields(self): + """return a dictionary with key of table name and set of value of automatic(primary key and bookmark field) fields""" + auto_fields = {} + for k, v in self.expected_metadata().items(): + auto_fields[k] = v.get(self.PRIMARY_KEYS, set()) | v.get(self.REPLICATION_KEYS, set()) + return auto_fields + + def run_and_verify_check_mode(self, conn_id): + """ + Run the tap in check mode and verify it succeeds. + This should be ran prior to field selection and initial sync. + Return the connection id and found catalogs from menagerie. + """ + # run in check mode + check_job_name = runner.run_check_mode(self, conn_id) + + # verify check exit codes + exit_status = menagerie.get_exit_status(conn_id, check_job_name) + menagerie.verify_check_exit_status(self, exit_status, check_job_name) + + found_catalogs = menagerie.get_catalogs(conn_id) + self.assertGreater(len(found_catalogs), 0, msg="unable to locate schemas for connection {}".format(conn_id)) + + found_catalog_names = set(map(lambda c: c['stream_name'], found_catalogs)) + self.assertSetEqual(self.expected_check_streams(), found_catalog_names, msg="discovered schemas do not match") + print("discovered schemas are OK") + + return found_catalogs + + def run_and_verify_sync(self, conn_id): + sync_job_name = runner.run_sync_mode(self, conn_id) + + # verify tap and target exit codes + exit_status = menagerie.get_exit_status(conn_id, sync_job_name) + menagerie.verify_sync_exit_status(self, exit_status, sync_job_name) + + sync_record_count = runner.examine_target_output_file(self, + conn_id, + self.expected_check_streams(), + self.expected_primary_keys()) + + self.assertGreater( + sum(sync_record_count.values()), 0, + msg="failed to replicate any data: {}".format(sync_record_count) + ) + print("total replicated row count: {}".format(sum(sync_record_count.values()))) + + return sync_record_count + + def perform_and_verify_table_and_field_selection(self, + conn_id, + test_catalogs, + select_all_fields=True): + """ + Perform table and field selection based off of the streams to select + set and field selection parameters. + Verify this results in the expected streams selected and all or no + fields selected for those streams. + """ + + # Select all available fields or select no fields from all testable streams + self.select_all_streams_and_fields( + conn_id=conn_id, catalogs=test_catalogs, select_all_fields=select_all_fields + ) + + catalogs = menagerie.get_catalogs(conn_id) + + # Ensure our selection affects the catalog + expected_selected = [tc.get('stream_name') for tc in test_catalogs] + for cat in catalogs: + catalog_entry = menagerie.get_annotated_schema(conn_id, cat['stream_id']) + + # Verify all testable streams are selected + selected = catalog_entry.get('annotated-schema').get('selected') + print("Validating selection on {}: {}".format(cat['stream_name'], selected)) + if cat['stream_name'] not in expected_selected: + self.assertFalse(selected, msg="Stream selected, but not testable.") + continue # Skip remaining assertions if we aren't selecting this stream + self.assertTrue(selected, msg="Stream not selected.") + + if select_all_fields: + # Verify all fields within each selected stream are selected + for field, field_props in catalog_entry.get('annotated-schema').get('properties').items(): + field_selected = field_props.get('selected') + print("\tValidating selection on {}.{}: {}".format( + cat['stream_name'], field, field_selected)) + self.assertTrue(field_selected, msg="Field not selected.") + else: + # Verify only automatic fields are selected + expected_automatic_fields = self.expected_automatic_fields().get(cat['stream_name']) + selected_fields = self.get_selected_fields_from_metadata(catalog_entry['metadata']) + self.assertEqual(expected_automatic_fields, selected_fields) + + @staticmethod + def get_selected_fields_from_metadata(metadata): + selected_fields = set() + for field in metadata: + is_field_metadata = len(field['breadcrumb']) > 1 + inclusion_automatic_or_selected = ( + field['metadata']['selected'] is True or \ + field['metadata']['inclusion'] == 'automatic' + ) + if is_field_metadata and inclusion_automatic_or_selected: + selected_fields.add(field['breadcrumb'][1]) + return selected_fields + + + @staticmethod + def select_all_streams_and_fields(conn_id, catalogs, select_all_fields: bool = True): + """Select all streams and all fields within streams""" + for catalog in catalogs: + schema = menagerie.get_annotated_schema(conn_id, catalog['stream_id']) + + non_selected_properties = [] + if not select_all_fields: + # get a list of all properties so that none are selected + non_selected_properties = schema.get('annotated-schema', {}).get( + 'properties', {}).keys() + + connections.select_catalog_and_fields_via_metadata( + conn_id, catalog, schema, [], non_selected_properties) + + def parse_date(self, date_value): + """ + Pass in string-formatted-datetime, parse the value, and return it as an unformatted datetime object. + """ + date_formats = { + "%Y-%m-%dT%H:%M:%S.%fZ", + "%Y-%m-%dT%H:%M:%SZ", + "%Y-%m-%dT%H:%M:%S.%f+00:00", + "%Y-%m-%dT%H:%M:%S+00:00", + "%Y-%m-%d" + } + for date_format in date_formats: + try: + date_stripped = dt.strptime(date_value, date_format) + return date_stripped + except ValueError: + continue + + raise NotImplementedError( + "Tests do not account for dates of this format: {}".format(date_value)) + + def calculated_states_by_stream(self, current_state): + timedelta_by_stream = {stream: [0,1,1] # {stream_name: [days, hours, minutes], ...} + for stream in self.expected_check_streams()} + + stream_to_calculated_state = {stream: "" for stream in current_state['bookmarks'].keys()} + for stream, state in current_state['bookmarks'].items(): + state_key, state_value = next(iter(state.keys())), next(iter(state.values())) + state_as_datetime = dateutil.parser.parse(state_value) + + days, hours, minutes = timedelta_by_stream[stream] + calculated_state_as_datetime = state_as_datetime - timedelta(days=days, hours=hours, minutes=minutes) + + state_format = '%Y-%m-%dT%H:%M:%SZ' + calculated_state_formatted = dt.strftime(calculated_state_as_datetime, state_format) + + stream_to_calculated_state[stream] = {state_key: calculated_state_formatted} + + return stream_to_calculated_state + + def timedelta_formatted(self, dtime, days=0): + try: + date_stripped = dt.strptime(dtime, self.START_DATE_FORMAT) + return_date = date_stripped + timedelta(days=days) + + return dt.strftime(return_date, self.START_DATE_FORMAT) + + except ValueError: + return Exception("Datetime object is not of the format: {}".format(self.START_DATE_FORMAT)) + + def convert_state_to_utc(self, date_str): + """ + Convert a saved bookmark value of the form '2020-08-25T13:17:36-07:00' to + a string formatted utc datetime, + in order to compare aginast json formatted datetime values + """ + date_object = dateutil.parser.parse(date_str) + date_object_utc = date_object.astimezone(tz=pytz.UTC) + return dt.strftime(date_object_utc, "%Y-%m-%dT%H:%M:%SZ") diff --git a/test/test_all_fields.py b/test/test_all_fields.py new file mode 100644 index 0000000..35dd876 --- /dev/null +++ b/test/test_all_fields.py @@ -0,0 +1,83 @@ +import tap_tester.connections as connections +import tap_tester.runner as runner +import tap_tester.menagerie as menagerie +from base import ZendeskTest + +class ZendeskAllFields(ZendeskTest): + """Ensure running the tap with all streams and fields selected results in the replication of all fields.""" + + def name(self): + return "zendesk_all_fields" + + def test_run(self): + """ + • Verify no unexpected streams were replicated + • Verify that more than just the automatic fields are replicated for each stream. + • verify all fields for each stream are replicated + """ + + + # Streams to verify all fields tests + expected_streams = self.expected_check_streams() + + expected_automatic_fields = self.expected_automatic_fields() + conn_id = connections.ensure_connection(self) + + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # table and field selection + test_catalogs_all_fields = [catalog for catalog in found_catalogs + if catalog.get('tap_stream_id') in expected_streams] + + self.perform_and_verify_table_and_field_selection( + conn_id, test_catalogs_all_fields) + + # grab metadata after performing table-and-field selection to set expectations + # used for asserting all fields are replicated + stream_to_all_catalog_fields = dict() + for catalog in test_catalogs_all_fields: + stream_id, stream_name = catalog['stream_id'], catalog['stream_name'] + catalog_entry = menagerie.get_annotated_schema(conn_id, stream_id) + fields_from_field_level_md = [md_entry['breadcrumb'][1] + for md_entry in catalog_entry['metadata'] + if md_entry['breadcrumb'] != []] + stream_to_all_catalog_fields[stream_name] = set( + fields_from_field_level_md) + + self.run_and_verify_sync(conn_id) + + synced_records = runner.get_records_from_target_output() + + # Verify no unexpected streams were replicated + synced_stream_names = set(synced_records.keys()) + self.assertSetEqual(expected_streams, synced_stream_names) + + for stream in expected_streams: + with self.subTest(stream=stream): + + # expected values + expected_all_keys = stream_to_all_catalog_fields[stream] + expected_automatic_keys = expected_automatic_fields.get( + stream, set()) + + # Verify that more than just the automatic fields are replicated for each stream. + self.assertTrue(expected_automatic_keys.issubset( + expected_all_keys), msg='{} is not in "expected_all_keys"'.format(expected_automatic_keys-expected_all_keys)) + + messages = synced_records.get(stream) + # collect actual values + actual_all_keys = set() + for message in messages['messages']: + if message['action'] == 'upsert': + actual_all_keys.update(message['data'].keys()) + + # As we can't generate following fields by zendesk APIs now so expected. + if stream == "ticket_fields": + expected_all_keys = expected_all_keys - {'system_field_options', 'sub_type_id'} + elif stream == "users": + expected_all_keys = expected_all_keys - {'permanently_deleted'} + elif stream == "ticket_metrics": + expected_all_keys = expected_all_keys - {'status', 'instance_id', 'metric', 'type', 'time'} + + # verify all fields for each stream are replicated + self.assertSetEqual(expected_all_keys, actual_all_keys) diff --git a/test/test_automatic_fields.py b/test/test_automatic_fields.py new file mode 100644 index 0000000..ab43c3a --- /dev/null +++ b/test/test_automatic_fields.py @@ -0,0 +1,66 @@ +import tap_tester.connections as connections +import tap_tester.runner as runner +from base import ZendeskTest + +class ZendeskAutomaticFields(ZendeskTest): + """ + Ensure running the tap with all streams selected and all fields deselected results in the replication of just the + primary keys and replication keys (automatic fields). + """ + + def name(self): + return "zendesk_automatic_fields" + + def test_run(self): + """ + Verify we can deselect all fields except when inclusion=automatic, which is handled by base.py methods + Verify that only the automatic fields are sent to the target. + Verify that all replicated records have unique primary key values. + """ + + streams_to_test = self.expected_check_streams() + + conn_id = connections.ensure_connection(self) + + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # table and field selection + test_catalogs_automatic_fields = [catalog for catalog in found_catalogs + if catalog.get('tap_stream_id') in streams_to_test] + + # Select all streams and no fields within streams + self.perform_and_verify_table_and_field_selection( + conn_id, test_catalogs_automatic_fields, select_all_fields=False) + + record_count_by_stream = self.run_and_verify_sync(conn_id) + synced_records = runner.get_records_from_target_output() + + for stream in streams_to_test: + with self.subTest(stream=stream): + + # expected values + expected_keys = self.expected_automatic_fields().get(stream) + expected_primary_keys = self.expected_primary_keys()[stream] + + # collect actual values + data = synced_records.get(stream, {}) + record_messages_keys = [set(row['data'].keys()) + for row in data.get('messages', [])] + primary_keys_list = [tuple(message.get('data', {}).get(expected_pk) for expected_pk in expected_primary_keys) + for message in data.get('messages', []) + if message.get('action') == 'upsert'] + unique_primary_keys_list = set(primary_keys_list) + + # Verify that you get some records for each stream + self.assertGreater( + record_count_by_stream.get(stream, -1), 0, + msg="The number of records is not over the stream min limit") + + # Verify that only the automatic fields are sent to the target + for actual_keys in record_messages_keys: + self.assertSetEqual(expected_keys, actual_keys) + + #Verify that all replicated records have unique primary key values. + self.assertEqual(len(primary_keys_list), + len(unique_primary_keys_list), + msg="Replicated record does not have unique primary key values.") \ No newline at end of file diff --git a/test/test_discovery.py b/test/test_discovery.py new file mode 100644 index 0000000..ba936a5 --- /dev/null +++ b/test/test_discovery.py @@ -0,0 +1,129 @@ +import re + +import tap_tester.connections as connections +from base import ZendeskTest +from tap_tester import menagerie + +class ZendeskDiscover(ZendeskTest): + """ + Testing that discovery creates the appropriate catalog with valid metadata. + • Verify number of actual streams discovered match expected + • Verify the stream names discovered were what we expect + • Verify stream names follow naming convention + streams should only have lowercase alphas and underscores + • verify there is only 1 top level breadcrumb + • verify replication key(s) + • verify primary key(s) + • verify that if there is a replication key we are doing INCREMENTAL otherwise FULL + • verify the actual replication matches our expected replication method + • verify that primary, replication keys are given the inclusion of automatic. + • verify that all other fields have inclusion of available metadata. + """ + + def name(self): + return "zendesk_discover_test" + + def test_run(self): + streams_to_test = self.expected_check_streams() + + conn_id = connections.ensure_connection(self, payload_hook=None) + + # Verify that there are catalogs found + found_catalogs = self.run_and_verify_check_mode( + conn_id) + + # Verify stream names follow naming convention + # streams should only have lowercase alphas and underscores + found_catalog_names = {c['tap_stream_id'] for c in found_catalogs} + self.assertTrue(all([re.fullmatch(r"[a-z_]+", name) for name in found_catalog_names]), + msg="One or more streams don't follow standard naming") + + for stream in streams_to_test: + with self.subTest(stream=stream): + + # Verify ensure the caatalog is found for a given stream + catalog = next(iter([catalog for catalog in found_catalogs + if catalog["stream_name"] == stream])) + self.assertIsNotNone(catalog) + + # collecting expected values + expected_primary_keys = self.expected_primary_keys()[stream] + expected_replication_keys = self.expected_replication_keys()[ + stream] + expected_automatic_fields = self.expected_automatic_fields().get(stream) + expected_replication_method = self.expected_replication_method()[ + stream] + + # collecting actual values... + schema_and_metadata = menagerie.get_annotated_schema( + conn_id, catalog['stream_id']) + metadata = schema_and_metadata["metadata"] + stream_properties = [ + item for item in metadata if item.get("breadcrumb") == []] + actual_primary_keys = set( + stream_properties[0].get( + "metadata", {self.PRIMARY_KEYS: []}).get(self.PRIMARY_KEYS, []) + ) + actual_replication_keys = set( + stream_properties[0].get( + "metadata", {self.REPLICATION_KEYS: []}).get(self.REPLICATION_KEYS, []) + ) + actual_replication_method = stream_properties[0].get( + "metadata", {self.REPLICATION_METHOD: None}).get(self.REPLICATION_METHOD) + actual_automatic_fields = set( + item.get("breadcrumb", ["properties", None])[1] for item in metadata + if item.get("metadata").get("inclusion") == "automatic" + ) + + ########################################################################## + # metadata assertions + ########################################################################## + + # verify there is only 1 top level breadcrumb in metadata + self.assertTrue(len(stream_properties) == 1, + msg="There is NOT only one top level breadcrumb for {}".format(stream) + + "\nstream_properties | {}".format(stream_properties)) + + # verify primary key(s) match expectations + self.assertSetEqual( + expected_primary_keys, actual_primary_keys, + ) + + # verify that primary keys and replication keys + # are given the inclusion of automatic in metadata. + self.assertSetEqual(expected_automatic_fields, + actual_automatic_fields) + + # verify that all other fields have inclusion of available + # This assumes there are no unsupported fields for SaaS sources + self.assertTrue( + all({item.get("metadata").get("inclusion") == "available" + for item in metadata + if item.get("breadcrumb", []) != [] + and item.get("breadcrumb", ["properties", None])[1] + not in actual_automatic_fields}), + msg="Not all non key properties are set to available in metadata") + + # verify that if there is a replication key we are doing INCREMENTAL otherwise FULL + # Given below streams are child stremas of parent stream `tickets` and tickets is incremental streams + # so, child streams also behave as incremental streams but does not save it's own state. So, skipping it. + if not stream in ["ticket_comments", "ticket_audits", "ticket_metrics"]: + + if actual_replication_keys: + self.assertTrue(actual_replication_method == self.INCREMENTAL, + msg="Expected INCREMENTAL replication " + "since there is a replication key") + else: + self.assertTrue(actual_replication_method == self.FULL_TABLE, + msg="Expected FULL replication " + "since there is no replication key") + + # verify the actual replication matches our expected replication method + self.assertEqual(expected_replication_method, actual_replication_method, + msg="The actual replication method {} doesn't match the expected {}".format( + actual_replication_method, expected_replication_method)) + + # verify replication key(s) + self.assertEqual(expected_replication_keys, actual_replication_keys, + msg="expected replication key {} but actual is {}".format( + expected_replication_keys, actual_replication_keys)) diff --git a/test/test_pagination.py b/test/test_pagination.py new file mode 100644 index 0000000..3b0cf8a --- /dev/null +++ b/test/test_pagination.py @@ -0,0 +1,70 @@ +import tap_tester.connections as connections +import tap_tester.runner as runner +import tap_tester.menagerie as menagerie +from base import ZendeskTest + + +class ZendeskPagination(ZendeskTest): + """ + Ensure tap can replicate multiple pages of data for streams that use pagination. + """ + API_LIMIT = 100 + def name(self): + return "zendesk_pagination_test" + + def test_run(self): + """ + • Verify that for each stream you can get multiple pages of data. + This requires we ensure more than 1 page of data exists at all times for any given stream. + • Verify by pks that the data replicated matches the data we expect. + + """ + + # Streams to verify all fields tests + expected_streams = self.expected_check_streams() + #Skip satisfaction_ratings streams as only end user of tickets can create satisfaction_ratings + expected_streams = expected_streams - {"satisfaction_ratings"} + + conn_id = connections.ensure_connection(self) + + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # table and field selection + test_catalogs_all_fields = [catalog for catalog in found_catalogs + if catalog.get('tap_stream_id') in expected_streams] + + self.perform_and_verify_table_and_field_selection( + conn_id, test_catalogs_all_fields) + + record_count_by_stream = self.run_and_verify_sync(conn_id) + + synced_records = runner.get_records_from_target_output() + + # Verify no unexpected streams were replicated + synced_stream_names = set(synced_records.keys()) + self.assertSetEqual(expected_streams, synced_stream_names) + + for stream in expected_streams: + with self.subTest(stream=stream): + + # expected values + expected_primary_keys = self.expected_primary_keys()[stream] + + # verify that we can paginate with all fields selected + record_count_sync = record_count_by_stream.get(stream, 0) + self.assertGreater(record_count_sync, self.API_LIMIT, + msg="The number of records is not over the stream max limit") + + primary_keys_list = [tuple([message.get('data').get(expected_pk) for expected_pk in expected_primary_keys]) + for message in synced_records.get(stream).get('messages') + if message.get('action') == 'upsert'] + + primary_keys_list_1 = primary_keys_list[:self.API_LIMIT] + primary_keys_list_2 = primary_keys_list[self.API_LIMIT:2*self.API_LIMIT] + + primary_keys_page_1 = set(primary_keys_list_1) + primary_keys_page_2 = set(primary_keys_list_2) + + # Verify by primary keys that data is unique for page + self.assertTrue( + primary_keys_page_1.isdisjoint(primary_keys_page_2)) diff --git a/test/test_standard_bookmark.py b/test/test_standard_bookmark.py new file mode 100644 index 0000000..cb3d90e --- /dev/null +++ b/test/test_standard_bookmark.py @@ -0,0 +1,196 @@ +import tap_tester.connections as connections +import tap_tester.runner as runner +from base import ZendeskTest +from tap_tester import menagerie +from datetime import datetime +import uuid +import os +import time +from zenpy import Zenpy +from zenpy.lib.api_objects import User + +class ZendeskBookMark(ZendeskTest): + """Test tap sets a bookmark and respects it for the next sync of a stream""" + + def name(self): + return "zendesk_bookmark_test" + + def test_run(self): + """ + Verify that for each stream you can do a sync which records bookmarks. + That the bookmark is the maximum value sent to the target for the replication key. + That a second sync respects the bookmark + All data of the second sync is >= the bookmark from the first sync + The number of records in the 2nd sync is less then the first (This assumes that + new data added to the stream is done at a rate slow enough that you haven't + doubled the amount of data from the start date to the first sync between + the first sync and second sync run in this test) + + Verify that for full table stream, all data replicated in sync 1 is replicated again in sync 2. + + PREREQUISITE + For EACH stream that is incrementally replicated there are multiple rows of data with + different values for the replication key + """ + + + expected_streams = self.expected_check_streams() + + expected_replication_keys = self.expected_replication_keys() + expected_replication_methods = self.expected_replication_method() + + ########################################################################## + # First Sync + ########################################################################## + conn_id = connections.ensure_connection(self) + + # Run in check mode + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # table and field selection + catalog_entries = [catalog for catalog in found_catalogs + if catalog.get('tap_stream_id') in expected_streams] + + self.perform_and_verify_table_and_field_selection( + conn_id, catalog_entries) + + # Run a first sync job using orchestrator + first_sync_record_count = self.run_and_verify_sync(conn_id) + first_sync_records = runner.get_records_from_target_output() + first_sync_bookmarks = menagerie.get_state(conn_id) + + ########################################################################## + # Update State Between Syncs + ########################################################################## + + new_states = {'bookmarks': dict()} + simulated_states = self.calculated_states_by_stream( + first_sync_bookmarks) + for stream, new_state in simulated_states.items(): + new_states['bookmarks'][stream] = new_state + menagerie.set_state(conn_id, new_states) + + ########################################################################## + # Second Sync + ########################################################################## + + second_sync_record_count = self.run_and_verify_sync(conn_id) + second_sync_records = runner.get_records_from_target_output() + second_sync_bookmarks = menagerie.get_state(conn_id) + + ########################################################################## + # Test By Stream + ########################################################################## + + + for stream in expected_streams: + with self.subTest(stream=stream): + + # expected values + expected_replication_method = expected_replication_methods[stream] + + # collect information for assertions from syncs 1 & 2 base on expected values + first_sync_count = first_sync_record_count.get(stream, 0) + second_sync_count = second_sync_record_count.get(stream, 0) + first_sync_messages = [record.get('data') for record in + first_sync_records.get( + stream, {}).get('messages', []) + if record.get('action') == 'upsert'] + second_sync_messages = [record.get('data') for record in + second_sync_records.get( + stream, {}).get('messages', []) + if record.get('action') == 'upsert'] + first_bookmark_key_value = first_sync_bookmarks.get('bookmarks', {stream: None}).get(stream) + second_bookmark_key_value = second_sync_bookmarks.get('bookmarks', {stream: None}).get(stream) + + + if expected_replication_method == self.INCREMENTAL: + + # collect information specific to incremental streams from syncs 1 & 2 + replication_key = next( + iter(expected_replication_keys[stream])) + first_bookmark_value = first_bookmark_key_value.get(replication_key) + second_bookmark_value = second_bookmark_key_value.get(replication_key) + first_bookmark_value_utc = self.convert_state_to_utc( + first_bookmark_value) + second_bookmark_value_utc = self.convert_state_to_utc( + second_bookmark_value) + + + simulated_bookmark_value = self.convert_state_to_utc(new_states['bookmarks'][stream][replication_key]) + + # Verify the first sync sets a bookmark of the expected form + self.assertIsNotNone(first_bookmark_key_value) + self.assertIsNotNone(first_bookmark_value) + + # Verify the second sync sets a bookmark of the expected form + self.assertIsNotNone(second_bookmark_key_value) + self.assertIsNotNone(second_bookmark_value) + + # Verify the second sync bookmark is Equal to the first sync bookmark + # assumes no changes to data during test + if not stream == "users": + self.assertEqual(second_bookmark_value, + first_bookmark_value) + else: + # For `users` stream it stores bookmark as 1 minute less than current time if `updated_at` of + # last records less than it. So, if there is no data change then second_bookmark_value will be + # 1 minute less than current time. Therefore second_bookmark_value will always be + # greater or equal to first_bookmark_value + self.assertGreaterEqual(second_bookmark_value, + first_bookmark_value) + + for record in first_sync_messages: + + # Verify the first sync bookmark value is the max replication key value for a given stream + replication_key_value = record.get(replication_key) + # For `ticket` stream it stores bookmarks as int timestamp. So, converting it to the string. + if stream == "tickets": + replication_key_value = datetime.utcfromtimestamp(replication_key_value).strftime('%Y-%m-%dT%H:%M:%SZ') + + self.assertLessEqual( + replication_key_value, first_bookmark_value_utc, + msg="First sync bookmark was set incorrectly, a record with a greater replication-key value was synced." + ) + + for record in second_sync_messages: + # Verify the second sync replication key value is Greater or Equal to the first sync bookmark + replication_key_value = record.get(replication_key) + + if stream == "tickets": + replication_key_value = datetime.utcfromtimestamp(replication_key_value).strftime('%Y-%m-%dT%H:%M:%SZ') + + self.assertGreaterEqual(replication_key_value, simulated_bookmark_value, + msg="Second sync records do not repect the previous bookmark.") + + # Verify the second sync bookmark value is the max replication key value for a given stream + self.assertLessEqual( + replication_key_value, second_bookmark_value_utc, + msg="Second sync bookmark was set incorrectly, a record with a greater replication-key value was synced." + ) + + elif expected_replication_method == self.FULL_TABLE: + + # Verify the syncs do not set a bookmark for full table streams + self.assertIsNone(first_bookmark_key_value) + self.assertIsNone(second_bookmark_key_value) + + # Verify the number of records in the second sync is the same as the first + + # Given below streams are child stremas of parent stream `tickets` and tickets is incremental streams + # Child streams also behave like incremental streams but does not save it's own state. So, it don't + # have same no of record on second sync and first sync. + if not stream in ["ticket_comments", "ticket_audits", "ticket_metrics"]: + self.assertEqual(second_sync_count, first_sync_count) + + else: + + raise NotImplementedError( + "INVALID EXPECTATIONS\t\tSTREAM: {} REPLICATION_METHOD: {}".format( + stream, expected_replication_method) + ) + + # Verify at least 1 record was replicated in the second sync + self.assertGreater( + second_sync_count, 0, msg="We are not fully testing bookmarking for {}".format(stream)) + diff --git a/test/test_start_date.py b/test/test_start_date.py new file mode 100644 index 0000000..0042da3 --- /dev/null +++ b/test/test_start_date.py @@ -0,0 +1,165 @@ +import tap_tester.connections as connections +import tap_tester.runner as runner +from base import ZendeskTest +from datetime import datetime + +class ZendeskStartDate(ZendeskTest): + """ + Ensure both all expected streams respect the start date. Run tap in check mode, + run 1st sync with start date = few days ago, run check mode and 2nd sync on a new connection with start date = today. + """ + + + start_date_1 = "" + start_date_2 = "" + + def name(self): + return "zendesk_start_date_test" + + def test_run(self): + """ + Test that the start_date configuration is respected + • verify that a sync with a later start date has at least one record synced + and less records than the 1st sync with a previous start date + • verify that each stream has less records than the earlier start date sync + • verify all data from later start data has bookmark values >= start_date + """ + self.run_test(days=1172, expected_streams=self.expected_check_streams()-{"ticket_forms"}) + self.run_test(days=1774, expected_streams={"ticket_forms"}) + + def run_test(self, days, expected_streams): + self.start_date_1 = self.get_properties().get('start_date') + self.start_date_2 = self.timedelta_formatted(self.start_date_1, days=days) + self.start_date = self.start_date_1 + + expected_streams = expected_streams + + ########################################################################## + # First Sync + ########################################################################## + + # instantiate connection + conn_id_1 = connections.ensure_connection(self) + + # run check mode + found_catalogs_1 = self.run_and_verify_check_mode(conn_id_1) + + # table and field selection + test_catalogs_1_all_fields = [catalog for catalog in found_catalogs_1 + if catalog.get('tap_stream_id') in expected_streams] + self.perform_and_verify_table_and_field_selection( + conn_id_1, test_catalogs_1_all_fields, select_all_fields=True) + + # run initial sync + record_count_by_stream_1 = self.run_and_verify_sync(conn_id_1) + synced_records_1 = runner.get_records_from_target_output() + + ########################################################################## + # Update START DATE Between Syncs + ########################################################################## + + print("REPLICATION START DATE CHANGE: {} ===>>> {} ".format( + self.start_date, self.start_date_2)) + self.start_date = self.start_date_2 + + ########################################################################## + # Second Sync + ########################################################################## + + # create a new connection with the new start_date + conn_id_2 = connections.ensure_connection( + self, original_properties=False) + + # run check mode + found_catalogs_2 = self.run_and_verify_check_mode(conn_id_2) + + # table and field selection + test_catalogs_2_all_fields = [catalog for catalog in found_catalogs_2 + if catalog.get('tap_stream_id') in expected_streams] + self.perform_and_verify_table_and_field_selection( + conn_id_2, test_catalogs_2_all_fields, select_all_fields=True) + + # run sync + record_count_by_stream_2 = self.run_and_verify_sync(conn_id_2) + synced_records_2 = runner.get_records_from_target_output() + + for stream in expected_streams: + with self.subTest(stream=stream): + + # expected values + expected_primary_keys = self.expected_primary_keys()[stream] + + # collect information for assertions from syncs 1 & 2 base on expected values + record_count_sync_1 = record_count_by_stream_1.get(stream, 0) + record_count_sync_2 = record_count_by_stream_2.get(stream, 0) + + primary_keys_list_1 = [tuple(message.get('data').get(expected_pk) for expected_pk in expected_primary_keys) + for message in synced_records_1.get(stream, {}).get('messages', []) + if message.get('action') == 'upsert'] + primary_keys_list_2 = [tuple(message.get('data').get(expected_pk) for expected_pk in expected_primary_keys) + for message in synced_records_2.get(stream, {}).get('messages', []) + if message.get('action') == 'upsert'] + + primary_keys_sync_1 = set(primary_keys_list_1) + primary_keys_sync_2 = set(primary_keys_list_2) + + if self.expected_metadata()[stream][self.OBEYS_START_DATE]: + + # collect information specific to incremental streams from syncs 1 & 2 + expected_replication_key = next( + iter(self.expected_replication_keys().get(stream, []))) + replication_dates_1 = [row.get('data').get(expected_replication_key) for row in + synced_records_1.get(stream, {'messages': []}).get('messages', []) + if row.get('data')] + replication_dates_2 = [row.get('data').get(expected_replication_key) for row in + synced_records_2.get(stream, {'messages': []}).get('messages', []) + if row.get('data')] + + # Verify replication key is greater or equal to start_date for sync 1 + for replication_date in replication_dates_1: + if stream == "tickets": + replication_date = datetime.utcfromtimestamp(replication_date).strftime('%Y-%m-%dT%H:%M:%SZ') + + self.assertGreaterEqual( + self.parse_date(replication_date), self.parse_date( + self.start_date_1), + msg="Report pertains to a date prior to our start date.\n" + + "Sync start_date: {}\n".format(self.start_date_1) + + "Record date: {} ".format(replication_date) + ) + + # Verify replication key is greater or equal to start_date for sync 2 + for replication_date in replication_dates_2: + if stream == "tickets": + replication_date = datetime.utcfromtimestamp(replication_date).strftime('%Y-%m-%dT%H:%M:%SZ') + + self.assertGreaterEqual( + self.parse_date(replication_date), self.parse_date( + self.start_date_2), + msg="Report pertains to a date prior to our start date.\n" + + "Sync start_date: {}\n".format(self.start_date_2) + + "Record date: {} ".format(replication_date) + ) + + # Verify the number of records replicated in sync 1 is greater than the number + # of records replicated in sync 2 + self.assertGreater(record_count_sync_1, + record_count_sync_2) + + # Verify the records replicated in sync 2 were also replicated in sync 1 + self.assertTrue( + primary_keys_sync_2.issubset(primary_keys_sync_1)) + + else: + # Given below streams are child stremas of parent stream `tickets` and tickets is incremental streams + # Child streams also behave like incremental streams but does not save it's own state. So, it don't + # have same no of record on second sync and first sync. + + # Verify that the 2nd sync with a later start date replicates the same number of + # records as the 1st sync. + if not stream in ["ticket_comments", "ticket_audits", "ticket_metrics"]: + self.assertEqual(record_count_sync_2, record_count_sync_1) + + # Verify by primary key the same records are replicated in the 1st and 2nd syncs + self.assertSetEqual(primary_keys_sync_1, + primary_keys_sync_2) \ No newline at end of file