Skip to content

Commit

Permalink
tests: improve performance of tests by caching repos
Browse files Browse the repository at this point in the history
  • Loading branch information
Panaetius authored and Ralf Grubenmann committed May 26, 2023
1 parent e02e5bf commit f9329d2
Show file tree
Hide file tree
Showing 145 changed files with 441 additions and 305 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/test_deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ on:
branches:
- "**"
- "!master"

env:
RENKU_TEST_RECREATE_CACHE: "${{ (endsWith(github.ref, 'master') || endsWith(github.ref, 'develop') || startsWith(github.ref, 'refs/tags/') || startsWith(github.ref, 'refs/heads/release/' ) ) && '1' || '0' }}"
jobs:
set-matrix:
runs-on: ubuntu-latest
Expand Down
2 changes: 2 additions & 0 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,12 @@ def pytest_configure(config):
os.environ["RENKU_DISABLE_VERSION_CHECK"] = "1"
# NOTE: Set an env var during during tests to mark that Renku is running in a test session.
os.environ["RENKU_RUNNING_UNDER_TEST"] = "1"
os.environ["RENKU_SKIP_HOOK_CHECKS"] = "1"


def pytest_unconfigure(config):
"""Hook that is called by pytest after all tests are executed."""
os.environ.pop("RENKU_SKIP_MIN_VERSION_CHECK", None)
os.environ.pop("RENKU_DISABLE_VERSION_CHECK", None)
os.environ.pop("RENKU_RUNNING_UNDER_TEST", None)
os.environ.pop("RENKU_SKIP_HOOK_CHECKS", None)
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ pattern = """(?x) (?# ignore whitespace
"""

[tool.pytest.ini_options]
addopts = "--flake8 --black --doctest-glob=\"*.rst\" --doctest-modules --cov --cov-report=term-missing --ignore=docs/cheatsheet/"
addopts = "--doctest-glob=\"*.rst\" --doctest-modules --cov --cov-report=term-missing --ignore=docs/cheatsheet/"
doctest_optionflags = "ALLOW_UNICODE"
flake8-ignore = ["*.py", "E121", "E126", "E203", "E226", "E231", "W503", "W504", "docs/conf.py", "docs/cheatsheet/conf.py", "ALL"]
flake8-max-line-length = 120
Expand Down
4 changes: 4 additions & 0 deletions renku/data/pre-commit.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,10 @@
# RENKU HOOK. DO NOT REMOVE OR MODIFY.
######################################

if [ "$RENKU_SKIP_HOOK_CHECKS" == "1" ]; then
exit 0
fi

# Find all modified or added files, and do nothing if there aren't any.
export RENKU_DISABLE_VERSION_CHECK=true
IFS=$'\n' read -r -d '' -a MODIFIED_FILES \
Expand Down
29 changes: 16 additions & 13 deletions tests/cli/fixtures/cli_workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,19 +19,22 @@


@pytest.fixture
def workflow_graph(run_shell, project):
def workflow_graph(run_shell, project, cache_test_project):
"""Setup a project with a workflow graph."""
cache_test_project.set_name("workflow_graph_fixture")
if not cache_test_project.setup():

def _run_workflow(name, command, extra_args=""):
output = run_shell(f"renku run --name {name} {extra_args} -- {command}")
# Assert not allocated stderr.
assert output[1] is None
def _run_workflow(name, command, extra_args=""):
output = run_shell(f"renku run --name {name} {extra_args} -- {command}")
# Assert not allocated stderr.
assert output[1] is None

_run_workflow("r1", "echo 'test' > A")
_run_workflow("r2", "tee B C < A")
_run_workflow("r3", "cp A Z")
_run_workflow("r4", "cp B X")
_run_workflow("r5", "cat C Z > Y")
_run_workflow("r6", "bash -c 'cat X Y | tee R S'", extra_args="--input X --input Y --output R --output S")
_run_workflow("r7", "echo 'other' > H")
_run_workflow("r8", "tee I J < H")
_run_workflow("r1", "echo 'test' > A")
_run_workflow("r2", "tee B C < A")
_run_workflow("r3", "cp A Z")
_run_workflow("r4", "cp B X")
_run_workflow("r5", "cat C Z > Y")
_run_workflow("r6", "bash -c 'cat X Y | tee R S'", extra_args="--input X --input Y --output R --output S")
_run_workflow("r7", "echo 'other' > H")
_run_workflow("r8", "tee I J < H")
cache_test_project.save()
8 changes: 4 additions & 4 deletions tests/cli/test_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -1770,7 +1770,7 @@ def test_pull_data_from_lfs(runner, project, tmpdir, subdirectory, no_lfs_size_l
assert 0 == result.exit_code, format_result_exception(result)


def test_lfs_hook(project_with_injection, subdirectory, large_file):
def test_lfs_hook(project_with_injection, subdirectory, large_file, enable_precommit_hook):
"""Test committing large files to Git."""
filenames = {"large-file", "large file with whitespace", "large*file?with wildcards"}

Expand Down Expand Up @@ -1799,7 +1799,7 @@ def test_lfs_hook(project_with_injection, subdirectory, large_file):


@pytest.mark.parametrize("use_env_var", [False, True])
def test_lfs_hook_autocommit(runner, project, subdirectory, large_file, use_env_var):
def test_lfs_hook_autocommit(runner, project, subdirectory, large_file, use_env_var, enable_precommit_hook):
"""Test committing large files to Git gets automatically added to lfs."""
if use_env_var:
os.environ["AUTOCOMMIT_LFS"] = "true"
Expand Down Expand Up @@ -1831,7 +1831,7 @@ def test_lfs_hook_autocommit(runner, project, subdirectory, large_file, use_env_
assert filenames == tracked_lfs_files


def test_lfs_hook_can_be_avoided(runner, project, subdirectory, large_file):
def test_lfs_hook_can_be_avoided(runner, project, subdirectory, large_file, enable_precommit_hook):
"""Test committing large files to Git."""
result = runner.invoke(
cli, ["--no-external-storage", "dataset", "add", "--copy", "-c", "my-dataset", str(large_file)]
Expand All @@ -1840,7 +1840,7 @@ def test_lfs_hook_can_be_avoided(runner, project, subdirectory, large_file):
assert "OK" in result.output


def test_datadir_hook(runner, project, subdirectory):
def test_datadir_hook(runner, project, subdirectory, enable_precommit_hook):
"""Test pre-commit hook fir checking datadir files."""
set_value(section="renku", key="check_datadir_files", value="true", global_only=True)

Expand Down
58 changes: 33 additions & 25 deletions tests/cli/test_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,16 @@


@pytest.mark.parametrize("revision", ["", "HEAD", "HEAD^", "HEAD^..HEAD"])
def test_graph_export_validation(runner, project, directory_tree, run, revision):
def test_graph_export_validation(runner, project, directory_tree, run, revision, cache_test_project):
"""Test graph validation when exporting."""
assert 0 == runner.invoke(cli, ["dataset", "add", "--copy", "-c", "my-data", str(directory_tree)]).exit_code
if not cache_test_project.setup():
assert 0 == runner.invoke(cli, ["dataset", "add", "--copy", "-c", "my-data", str(directory_tree)]).exit_code

file1 = project.path / DATA_DIR / "my-data" / directory_tree.name / "file1"
file2 = project.path / DATA_DIR / "my-data" / directory_tree.name / "dir1" / "file2"
assert 0 == run(["run", "head", str(file1)], stdout="out1")
assert 0 == run(["run", "tail", str(file2)], stdout="out2")
file1 = project.path / DATA_DIR / "my-data" / directory_tree.name / "file1"
file2 = project.path / DATA_DIR / "my-data" / directory_tree.name / "dir1" / "file2"
assert 0 == run(["run", "head", str(file1)], stdout="out1")
assert 0 == run(["run", "tail", str(file2)], stdout="out2")
cache_test_project.save()

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict", "--revision", revision])

Expand All @@ -57,12 +59,14 @@ def test_graph_export_validation(runner, project, directory_tree, run, revision)

@pytest.mark.serial
@pytest.mark.shelled
def test_graph_export_strict_run(runner, project, run_shell):
def test_graph_export_strict_run(runner, project, run_shell, cache_test_project):
"""Test graph export output of run command."""
# Run a shell command with pipe.
assert run_shell('renku run --name run1 echo "my input string" > my_output_file')[1] is None
assert run_shell("renku run --name run2 cp my_output_file my_output_file2")[1] is None
assert run_shell("renku workflow compose my-composite-plan run1 run2")[1] is None
if not cache_test_project.setup():
# Run a shell command with pipe.
assert run_shell('renku run --name run1 echo "my input string" > my_output_file')[1] is None
assert run_shell("renku run --name run2 cp my_output_file my_output_file2")[1] is None
assert run_shell("renku workflow compose my-composite-plan run1 run2")[1] is None
cache_test_project.save()

# Assert created output file.
result = runner.invoke(cli, ["graph", "export", "--full", "--strict", "--format=json-ld"])
Expand All @@ -80,21 +84,25 @@ def test_graph_export_strict_run(runner, project, run_shell):
assert 0 == result.exit_code, format_result_exception(result)


def test_graph_export_strict_dataset(tmpdir, runner, project, subdirectory):
def test_graph_export_strict_dataset(tmpdir, runner, project, subdirectory, cache_test_project):
"""Test output of graph export for dataset add."""
result = runner.invoke(cli, ["dataset", "create", "my-dataset"])
assert 0 == result.exit_code, format_result_exception(result)
paths = []
test_paths = []
for i in range(3):
new_file = tmpdir.join(f"file_{i}")
new_file.write(str(i))
paths.append(str(new_file))
test_paths.append(os.path.relpath(str(new_file), str(project.path)))

# add data
result = runner.invoke(cli, ["dataset", "add", "--copy", "my-dataset"] + paths)
assert 0 == result.exit_code, format_result_exception(result)
if not cache_test_project.setup():
result = runner.invoke(cli, ["dataset", "create", "my-dataset"])
assert 0 == result.exit_code, format_result_exception(result)
paths = []
test_paths = []
for i in range(3):
new_file = tmpdir.join(f"file_{i}")
new_file.write(str(i))
paths.append(str(new_file))
test_paths.append(os.path.relpath(str(new_file), str(project.path)))

# add data
result = runner.invoke(cli, ["dataset", "add", "--copy", "my-dataset"] + paths)
assert 0 == result.exit_code, format_result_exception(result)
cache_test_project.save()
else:
test_paths = [f"../file_{i}" for i in range(3)]

result = runner.invoke(cli, ["graph", "export", "--strict", "--format=json-ld", "--revision", "HEAD"])
assert 0 == result.exit_code, format_result_exception(result)
Expand Down
Loading

0 comments on commit f9329d2

Please sign in to comment.