diff --git a/.gitignore b/.gitignore index cd07563..2b5b803 100644 --- a/.gitignore +++ b/.gitignore @@ -2,9 +2,11 @@ venv/ *.pyc *.egg-info/ +*wheel* .pytest* .coverage *cov/ dist/ docs/_build/ .DS* +django-queries-results.html diff --git a/README.md b/README.md index eb939fe..dccde40 100644 --- a/README.md +++ b/README.md @@ -61,6 +61,8 @@ You will find the [full documentation here](https://pytest-django-queries.readth ## Integrating with GitHub +TBA. + ## Testing locally Simply install `pytest-django-queries` through pip and run your tests using `pytest`. A report should have been generated in your @@ -100,12 +102,20 @@ You will get something like this to represent the results: ## Exporting the results (HTML) For a nicer presentation, use the `html` command, to export the results as HTML. ```shell -django-queries html > results.html +django-queries html ``` - + +It will generate something [like this](docs/extra_html/html_export_results.html). ## Comparing results +When running pytest, pass the `--django-backup-queries` (can take a path, optionally) +then you can run `django-queries diff` to generate results looking like this: + + + screenshot + + ## Development First of all, clone the project locally. Then, install it using the below command. diff --git a/docs/diff.rst b/docs/diff.rst index e9259b5..4b712a7 100644 --- a/docs/diff.rst +++ b/docs/diff.rst @@ -2,3 +2,11 @@ The Diff Command ---------------- + +The plugin can backup the test results for you if you pass the ``--django-backup-queries [BACKUP_PATH]`` parameter to it. It will create a backup to ``.pytest-query.old`` by default if previous results were found. + +.. warning:: + + Bear in mind that it will override any existing backup file in the provided or default path. + +After running ``pytest --django-backup-queries``, you can run ``django-queries diff`` to show the changes. Make sure you actually had previous results, otherwise it will have nothing to compare. diff --git a/docs/index.rst b/docs/index.rst index 680594d..219c407 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -29,7 +29,8 @@ Quick Start def test_another_query_performances(count_queries): Model.objects.all() -3. Then use ``django-queries show`` to show the results directly into your console: +3. Run ``pytest``; +4. Then use ``django-queries show`` to show the results directly into your console: .. code-block:: text @@ -53,13 +54,13 @@ Quick Start | module3 | | +---------+-------------------------+ -4. Or for a nicer presentation, use ``django-queries html > results.html`` to export the results as HTML. See `this example <./html_export_results.html>`_ for a demo! +5. Or for a nicer presentation, use ``django-queries html`` to export the results as HTML. See `this example <./html_export_results.html>`_ for a demo! .. image:: _static/html_export_results.png :width: 500 px :align: center -5. By running it twice with the option described :ref:`here ` and by running ``django-queries diff`` you will get something like this: +6. By running it twice with the option described :ref:`here ` and by running ``django-queries diff`` you will get something like this: .. image:: _static/diff_results.png :width: 500 px diff --git a/docs/usage.rst b/docs/usage.rst index 6dc54cd..b1950b2 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -1,3 +1,30 @@ +Plugin Usage +============ + +The plugin supports some optional parameters that are defined below. + +Customizing the Save Path ++++++++++++++++++++++++++ + +.. code-block:: text + + --django-db-bench=PATH + Output file for storing the results. Default: .pytest-queries + + +Backing Up Results +++++++++++++++++++ + +You can pass the ``--django-backup-queries`` parameter to backup previous results to `.pytest-django.old``. + +Or pass a custom path. + +.. code-block:: text + + --django-backup-queries=[PATH] + Whether the old results should be backed up or not before overriding. + + CLI Usage ========= @@ -20,11 +47,14 @@ The HTML Command .. code-block:: text - Usage: django-queries html [OPTIONS] [INPUT_FILE] + Usage: django-queries html [OPTIONS] [INPUT_FILE] [-o OUTPUT FILE] Render the results as HTML instead of a raw table. Options: + -o The path to save the HTML file into + django-queries.html by default. + You can pass a dash (-) to write to stdout as well. --template INTEGER --help Show this message and exit. diff --git a/pytest_django_queries/cli.py b/pytest_django_queries/cli.py index 534d0ed..c986d2b 100644 --- a/pytest_django_queries/cli.py +++ b/pytest_django_queries/cli.py @@ -12,15 +12,21 @@ DEFAULT_OLD_RESULT_FILENAME, DEFAULT_RESULT_FILENAME, ) -from pytest_django_queries.tables import print_entries, print_entries_as_html +from pytest_django_queries.tables import entries_to_html, print_entries HERE = dirname(__file__) DEFAULT_TEMPLATE_PATH = abspath(pathjoin(HERE, "templates", "default_bootstrap.jinja2")) +DEFAULT_HTML_SAVE_PATH = "django-queries-results.html" DIFF_TERM_COLOR = {"-": "red", "+": "green"} DEFAULT_TERM_DIFF_COLOR = None +def _write_html_to_file(content, path): + with open(path, "w") as fp: + fp.write(content) + + class JsonFileParamType(click.File): name = "integer" @@ -41,7 +47,7 @@ class Jinja2TemplateFile(click.File): def convert(self, value, param, ctx): fp = super(Jinja2TemplateFile, self).convert(value, param, ctx) try: - return Template(fp.read()) + return Template(fp.read(), trim_blocks=True) except jinja_exceptions.TemplateError as e: self.fail( "The file is not a valid jinja2 template: %s" % str(e), param, ctx @@ -66,10 +72,20 @@ def show(input_file): @click.argument( "input_file", type=JsonFileParamType("r"), default=DEFAULT_RESULT_FILENAME ) +@click.option("-o", "--output", type=str, default=DEFAULT_HTML_SAVE_PATH) @click.option("--template", type=Jinja2TemplateFile("r"), default=DEFAULT_TEMPLATE_PATH) -def html(input_file, template): - """Render the results as HTML instead of a raw table.""" - return print_entries_as_html(input_file, template) +def html(input_file, output, template): + """ + Render the results as HTML instead of a raw table. + + Note: you can pass a dash (-) as the path to print the HTML content to stdout.""" + html_content = entries_to_html(input_file, template) + + if output == "-": + click.echo(html_content, nl=False) + return + + _write_html_to_file(html_content, output) @main.command() diff --git a/pytest_django_queries/plugin.py b/pytest_django_queries/plugin.py index 55249ff..c8f05b4 100644 --- a/pytest_django_queries/plugin.py +++ b/pytest_django_queries/plugin.py @@ -1,4 +1,6 @@ import json +import shutil +from os.path import isfile import pytest from django.test.utils import CaptureQueriesContext @@ -17,9 +19,21 @@ def _get_session(request): return request.config.pytest_django_queries_session +def _create_backup(save_path, backup_path): + shutil.copy(save_path, backup_path) + + class _Session(object): - def __init__(self, save_path): + def __init__(self, save_path, backup_path): + """ + :param save_path: + :type save_path: str + + :param backup_path: + :type backup_path: bool + """ self.save_path = save_path + self.backup_path = backup_path self._data = {} def add_entry(self, module_name, test_name, query_count): @@ -27,6 +41,9 @@ def add_entry(self, module_name, test_name, query_count): module_data[test_name] = {"query-count": query_count} def save_json(self): + if self.backup_path and isfile(self.save_path): + _create_backup(self.save_path, self.backup_path) + with open(self.save_path, "w") as fp: json.dump(self._data, fp, indent=2) @@ -49,6 +66,15 @@ def pytest_addoption(parser): metavar="PATH", help="Output file for storing the results. Default: .pytest-queries", ) + group.addoption( + "--django-backup-queries", + dest="queries_backup_results", + action="store", + default=None, + metavar="PATH", + help="Whether the old results should be backed up or not before overriding", + nargs="?", + ) @pytest.mark.tryfirst @@ -63,10 +89,21 @@ def pytest_configure(config): @pytest.mark.tryfirst def pytest_load_initial_conftests(early_config, parser, args): - _set_session( - early_config, - _Session(early_config.known_args_namespace.queries_results_save_path), - ) + """ + :param early_config: + :param parser: + :param args: + :type args: tuple|list + :return: + """ + save_path = early_config.known_args_namespace.queries_results_save_path + backup_path = early_config.known_args_namespace.queries_backup_results + + # Set default value if the flag was provided without value in arguments + if backup_path is None and "--django-backup-queries" in args: + backup_path = DEFAULT_OLD_RESULT_FILENAME + + _set_session(early_config, _Session(save_path, backup_path)) @pytest.hookimpl(hookwrapper=True) diff --git a/pytest_django_queries/tables.py b/pytest_django_queries/tables.py index fa4824d..cf6b585 100644 --- a/pytest_django_queries/tables.py +++ b/pytest_django_queries/tables.py @@ -17,8 +17,8 @@ def print_entries(data): click.echo(table) -def print_entries_as_html(data, template): +def entries_to_html(data, template): html_content = template.render( data=iter_entries(data), humanize=format_underscore_name_to_human ) - click.echo(html_content, nl=False) + return html_content diff --git a/requirements_dev.txt b/requirements_dev.txt index 742205b..1f52bb0 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -3,3 +3,4 @@ lxml sphinx sphinx_rtd_theme pre-commit +mock diff --git a/setup.cfg b/setup.cfg index da69665..af7285d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,4 +58,4 @@ include_trailing_comma: True line_length = 88 known_first_party = pytest_django_queries -known_third_party =beautifultable,bs4,click,django,jinja2,pytest,setuptools +known_third_party =beautifultable,bs4,click,django,jinja2,mock,pytest,setuptools diff --git a/tests/test_cli.py b/tests/test_cli.py index 6601f52..a9d5881 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,6 +1,7 @@ import json from textwrap import dedent +import mock import pytest from bs4 import BeautifulSoup from click.testing import CliRunner @@ -99,7 +100,7 @@ def test_load_valid_json_file_shows_correct_data(testdir): def test_load_valid_json_file_shows_correct_html_data(testdir): testdir.makefile(".json", test_file=json.dumps(VALID_DATA)) runner = CliRunner() - result = runner.invoke(cli.main, ["html", "test_file.json"]) + result = runner.invoke(cli.main, ["html", "test_file.json", "-o", "-"]) assert result.exit_code == 0, result.stdout soup = BeautifulSoup(result.stdout, "lxml") sections = soup.select("section") @@ -130,7 +131,7 @@ def test_load_valid_json_without_data_is_empty_result(testdir, test_data): testdir.makefile(".json", test_file=json.dumps(test_data)) runner = CliRunner() - result = runner.invoke(cli.main, ["html", "test_file.json"]) + result = runner.invoke(cli.main, ["html", "test_file.json", "-o", "-"]) assert result.exit_code == 0, result.stdout soup = BeautifulSoup(result.stdout, "lxml") @@ -162,12 +163,36 @@ def test_export_to_html_using_custom_template(testdir): runner = CliRunner() result = runner.invoke( - cli.main, ["html", "test_file.json", "--template", "test_template.html"] + cli.main, + ["html", "test_file.json", "--output", "-", "--template", "test_template.html"], ) assert result.exit_code == 0, result.stdout assert result.stdout == "hello world" +@pytest.mark.parametrize( + "additional_args, expected_save_path", + ((["-o", "/somewhere/html"], "/somewhere/html"), ([], cli.DEFAULT_HTML_SAVE_PATH)), +) +@mock.patch.object(cli, "entries_to_html") +@mock.patch.object(cli, "_write_html_to_file") +def test_export_to_html_into_file( + mocked_write, mocked_entries_to_html, testdir, additional_args, expected_save_path +): + testdir.makefile(".json", test_file="{}") + runner = CliRunner() + + mocked_entries_to_html.return_value = "hi!" + + args = ["html", "test_file.json"] + additional_args + result = runner.invoke(cli.main, args) + + assert result.exit_code == 0, result.stdout + assert not result.stdout + + mocked_write.assert_called_once_with("hi!", expected_save_path) + + def test_export_to_html_using_invalid_custom_template_should_fail(testdir): testdir.makefile(".json", test_file="{}") testdir.makefile(".html", test_template='{% "hello world" %}') diff --git a/tests/test_plugin.py b/tests/test_plugin.py index c7c53a0..0569748 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -1,5 +1,7 @@ import json +import mock + DUMMY_TEST_QUERY = """ import pytest @@ -25,7 +27,7 @@ def test_fixture_is_invoked_when_marked(testdir): # Run a dummy test that performs queries # and triggers a counting of the query number - testdir.makepyfile(DUMMY_TEST_QUERY) + testdir.makepyfile(test_file=DUMMY_TEST_QUERY) results = testdir.runpytest("--django-db-bench", results_path) # Ensure the tests have passed @@ -34,9 +36,7 @@ def test_fixture_is_invoked_when_marked(testdir): # Ensure the results file was created assert results_path.check() assert json.load(results_path) == { - "test_fixture_is_invoked_when_marked": { - "test_count_db_query_number": {"query-count": 2} - } + "test_file": {"test_count_db_query_number": {"query-count": 2}} } @@ -94,6 +94,94 @@ def test_failure(): } +def test_fixture_is_backing_up_old_results(testdir): + """Ensure marking a test is backing up old results if asked to.""" + results_path = testdir.tmpdir.join("results.json") + old_results_path = testdir.tmpdir.join("results.old.json") + + # Run a dummy test that performs queries + # and triggers a counting of the query number + testdir.makepyfile(test_file=DUMMY_TEST_QUERY) + + results = testdir.runpytest( + "--django-db-bench", results_path, "--django-backup-queries", old_results_path + ) + + # Ensure the tests have passed + results.assert_outcomes(1, 0, 0) + + # Ensure the results file was created + assert results_path.check() + assert ( + not old_results_path.check() + ), "Nothing should have been backed up--there was nothing to back up" + + # Create another test to generate more results, + # to ensure the backup results were actually the previous ones + testdir.makepyfile(test_otherfile=DUMMY_TEST_QUERY) + + # Run again the tests + results = testdir.runpytest( + "--django-db-bench", results_path, "--django-backup-queries", old_results_path + ) + + # Ensure the tests have passed + results.assert_outcomes(2, 0, 0) + + # Ensure the results file was created + assert results_path.check() + assert old_results_path.check(), "The backup file should have been created" + + # Check contents + assert json.load(results_path) == { + "test_file": {"test_count_db_query_number": {"query-count": 2}}, + "test_otherfile": {"test_count_db_query_number": {"query-count": 2}}, + } + assert json.load(old_results_path) == { + "test_file": {"test_count_db_query_number": {"query-count": 2}} + } + + +def test_fixture_is_not_backing_up_if_not_asked_to(testdir): + """Ensure marking a test is backing up old results if asked to.""" + results_path = testdir.tmpdir.join("results.json") + results_path.ensure(file=True) # 'touch' the file + + # Run a dummy test that performs queries + # and triggers a counting of the query number + testdir.makepyfile(test_file=DUMMY_TEST_QUERY) + + with mock.patch("pytest_django_queries.plugin._create_backup") as mocked_backup: + results = testdir.runpytest("--django-db-bench", results_path) + assert mocked_backup.call_count == 0 + + # Ensure the tests have passed + results.assert_outcomes(1, 0, 0) + assert results_path.check() + + +def test_fixture_is_backing_up_old_results_to_default_path_if_no_path_provided(testdir): + """Ensure marking a test is backing up old results if asked to.""" + results_path = testdir.tmpdir.join("results.json") + results_path.ensure(file=True) # 'touch' the file + + # Run a dummy test that performs queries + # and triggers a counting of the query number + testdir.makepyfile(test_file=DUMMY_TEST_QUERY) + + with mock.patch("pytest_django_queries.plugin._create_backup") as mocked_backup: + from pytest_django_queries.plugin import DEFAULT_OLD_RESULT_FILENAME + + results = testdir.runpytest( + "--django-db-bench", results_path, "--django-backup-queries" + ) + mocked_backup.assert_called_with(str(results_path), DEFAULT_OLD_RESULT_FILENAME) + + # Ensure the tests have passed + results.assert_outcomes(1, 0, 0) + assert results_path.check() + + def test_marker_message(testdir): """Ensure the custom markers configuration is added to pytest.""" result = testdir.runpytest("--markers") @@ -114,5 +202,8 @@ def test_implements_custom_options(testdir): "*--django-db-bench=PATH", "*Output file for storing the results. Default: .pytest-", "*queries", + "*--django-backup-queries=[[]PATH[]]", + "*Whether the old results should be backed up or not", + "*before overriding", ] )