diff --git a/lib/pbench/server/api/__init__.py b/lib/pbench/server/api/__init__.py index 24ca328781..d9e15b5ea6 100644 --- a/lib/pbench/server/api/__init__.py +++ b/lib/pbench/server/api/__init__.py @@ -134,12 +134,6 @@ def register_endpoints(api: Api, app: Flask, config: PbenchServerConfig): endpoint="key", resource_class_args=(config,), ) - api.add_resource( - Visualize, - f"{base_uri}/visualize/", - endpoint="visualize", - resource_class_args=(config,), - ) api.add_resource( ServerAudit, f"{base_uri}/server/audit", @@ -166,6 +160,12 @@ def register_endpoints(api: Api, app: Flask, config: PbenchServerConfig): endpoint="upload", resource_class_args=(config,), ) + api.add_resource( + Visualize, + f"{base_uri}/datasets//visualize", + endpoint="visualize", + resource_class_args=(config,), + ) def get_server_config() -> PbenchServerConfig: diff --git a/lib/pbench/server/api/resources/visualize.py b/lib/pbench/server/api/resources/visualize.py index 33569830f2..648bcf1459 100644 --- a/lib/pbench/server/api/resources/visualize.py +++ b/lib/pbench/server/api/resources/visualize.py @@ -29,7 +29,7 @@ class Visualize(ApiBase): """ - API class to retrieve data using Quisby to visualize + This class implements the Server API used to retrieve data for visualization. """ def __init__(self, config: PbenchServerConfig): @@ -63,12 +63,22 @@ def _get( """ dataset = params.uri["dataset"] - cache_m = CacheManager(self.config, current_app.logger) + try: tarball = cache_m.find_dataset(dataset.resource_id) except TarballNotFound as e: - raise APIAbort(HTTPStatus.NOT_FOUND, str(e)) + raise APIAbort( + HTTPStatus.NOT_FOUND, f"No dataset with ID '{e.tarball}' found" + ) from e + + metadata = self._get_dataset_metadata( + dataset, ["dataset.metalog.pbench.script"] + ) + benchmark = metadata["dataset.metalog.pbench.script"].upper() + benchmark_type = BenchmarkName.__members__.get(benchmark) + if not benchmark_type: + raise APIAbort(HTTPStatus.UNSUPPORTED_MEDIA_TYPE, "Unsupported Benchmark") name = Dataset.stem(tarball.tarball_path) try: @@ -76,23 +86,13 @@ def _get( except TarballUnpackError as e: raise APIInternalError(str(e)) from e - metadata = self._get_dataset_metadata( - dataset, ["dataset.metalog.pbench.script"] - ) - benchmark = metadata["dataset.metalog.pbench.script"] - if benchmark == "uperf": - benchmark_type = BenchmarkName.UPERF - else: - raise APIAbort(HTTPStatus.UNSUPPORTED_MEDIA_TYPE, "Unsupported Benchmark") - - get_quisby_data = QuisbyProcessing.extract_data( - self, benchmark_type, dataset.name, InputType.STREAM, file + pquisby_obj = QuisbyProcessing() + get_quisby_data = pquisby_obj.extract_data( + benchmark_type, dataset.name, InputType.STREAM, file ) - if get_quisby_data["status"] == "success": - return jsonify(get_quisby_data) - - else: + if get_quisby_data["status"] != "success": raise APIInternalError( f"Quisby processing failure. Exception: {get_quisby_data['exception']}" - ) + ) from None + return jsonify(get_quisby_data) diff --git a/lib/pbench/server/cache_manager.py b/lib/pbench/server/cache_manager.py index caaacd0caa..d594a49efd 100644 --- a/lib/pbench/server/cache_manager.py +++ b/lib/pbench/server/cache_manager.py @@ -49,7 +49,7 @@ def __init__(self, tarball: str): self.tarball = tarball def __str__(self) -> str: - return f"The dataset tarball named {self.tarball!r} not found" + return f"The dataset tarball named {self.tarball!r} is not found" class DuplicateTarball(CacheManagerError): diff --git a/lib/pbench/test/unit/server/test_cache_manager.py b/lib/pbench/test/unit/server/test_cache_manager.py index 791d6ce7e9..93333e1ea6 100644 --- a/lib/pbench/test/unit/server/test_cache_manager.py +++ b/lib/pbench/test/unit/server/test_cache_manager.py @@ -924,7 +924,7 @@ def mock_run(args, **kwargs): assert tarball == cm[md5] with pytest.raises(TarballNotFound) as exc: cm["foobar"] - assert str(exc.value) == "The dataset tarball named 'foobar' not found" + assert str(exc.value) == "The dataset tarball named 'foobar' is not found" # Test __contains__ assert md5 in cm @@ -943,7 +943,7 @@ def mock_run(args, **kwargs): # Try to find a dataset that doesn't exist with pytest.raises(TarballNotFound) as exc: cm.find_dataset("foobar") - assert str(exc.value) == "The dataset tarball named 'foobar' not found" + assert str(exc.value) == "The dataset tarball named 'foobar' is not found" assert exc.value.tarball == "foobar" # Unpack the dataset, creating INCOMING and RESULTS links diff --git a/lib/pbench/test/unit/server/test_datasets_inventory.py b/lib/pbench/test/unit/server/test_datasets_inventory.py index 947ec9dda5..8f046b2c0d 100644 --- a/lib/pbench/test/unit/server/test_datasets_inventory.py +++ b/lib/pbench/test/unit/server/test_datasets_inventory.py @@ -59,7 +59,7 @@ def test_get_no_dataset(self, query_get_as): def test_dataset_not_present(self, query_get_as): response = query_get_as("fio_2", "metadata.log", HTTPStatus.NOT_FOUND) assert response.json == { - "message": "The dataset tarball named 'random_md5_string4' not found" + "message": "The dataset tarball named 'random_md5_string4' is not found" } def test_unauthorized_access(self, query_get_as): diff --git a/lib/pbench/test/unit/server/test_endpoint_configure.py b/lib/pbench/test/unit/server/test_endpoint_configure.py index e2fdbbeafb..9b2b0bea27 100644 --- a/lib/pbench/test/unit/server/test_endpoint_configure.py +++ b/lib/pbench/test/unit/server/test_endpoint_configure.py @@ -106,10 +106,6 @@ def check_config(self, client, server_config, host, my_headers={}): "template": f"{uri}/key/{{key}}", "params": {"key": {"type": "string"}}, }, - "visualize": { - "template": f"{uri}/visualize/{{dataset}}", - "params": {"dataset": {"type": "string"}}, - }, "relay": { "template": f"{uri}/relay/{{uri}}", "params": {"uri": {"type": "path"}}, @@ -123,6 +119,10 @@ def check_config(self, client, server_config, host, my_headers={}): "template": f"{uri}/upload/{{filename}}", "params": {"filename": {"type": "string"}}, }, + "visualize": { + "template": f"{uri}/datasets/{{dataset}}/visualize", + "params": {"dataset": {"type": "string"}}, + }, }, } diff --git a/lib/pbench/test/unit/server/test_visualize.py b/lib/pbench/test/unit/server/test_visualize.py index 3005150218..0fce225d98 100644 --- a/lib/pbench/test/unit/server/test_visualize.py +++ b/lib/pbench/test/unit/server/test_visualize.py @@ -6,7 +6,7 @@ import requests from pbench.server.api.resources import ApiBase -from pbench.server.cache_manager import CacheManager +from pbench.server.cache_manager import CacheManager, Tarball from pbench.server.database.models.datasets import Dataset, DatasetNotFound @@ -33,7 +33,7 @@ def query_api( dataset_id = dataset # Allow passing deliberately bad value headers = {"authorization": f"bearer {get_token_func(user)}"} response = client.get( - f"{server_config.rest_uri}/visualize/{dataset_id}", + f"{server_config.rest_uri}/datasets/{dataset_id}/visualize", headers=headers, ) assert response.status_code == expected_status @@ -41,18 +41,21 @@ def query_api( return query_api - def mock_find_dataset(self, dataset): + def mock_find_dataset(self, _dataset: str) -> Tarball: class Tarball(object): tarball_path = Path("/dataset/tarball.tar.xz") - def extract(tarball_path, path): + def extract(_tarball_path: Path, _path: str) -> str: return "CSV_file_as_a_byte_stream" return Tarball - def mock_get_dataset_metadata(self, dataset, key): + def mock_get_dataset_metadata(self, _dataset, _key): return {"dataset.metalog.pbench.script": "uperf"} + def mock_extract_data(self, test_name, dataset_name, input_type, data): + return {"status": "success", "json_data": "quisby_data"} + def test_get_no_dataset(self, query_get_as): response = query_get_as("nonexistent-dataset", "drb", HTTPStatus.NOT_FOUND) assert response.json == {"message": "Dataset 'nonexistent-dataset' not found"} @@ -60,7 +63,7 @@ def test_get_no_dataset(self, query_get_as): def test_dataset_not_present(self, query_get_as): response = query_get_as("fio_2", "drb", HTTPStatus.NOT_FOUND) assert response.json == { - "message": "The dataset tarball named 'random_md5_string4' not found" + "message": "No dataset with ID 'random_md5_string4' found" } def test_unauthorized_access(self, query_get_as): @@ -70,14 +73,12 @@ def test_unauthorized_access(self, query_get_as): } def test_successful_get(self, query_get_as, monkeypatch): - def mock_extract_data(self, test_name, dataset_name, input_type, data): - return {"status": "success", "json_data": "quisby_data"} monkeypatch.setattr(CacheManager, "find_dataset", self.mock_find_dataset) monkeypatch.setattr( ApiBase, "_get_dataset_metadata", self.mock_get_dataset_metadata ) - monkeypatch.setattr(QuisbyProcessing, "extract_data", mock_extract_data) + monkeypatch.setattr(QuisbyProcessing, "extract_data", self.mock_extract_data) response = query_get_as("uperf_1", "test", HTTPStatus.OK) assert response.json["status"] == "success" @@ -104,13 +105,16 @@ def mock_extract_data(self, test_name, dataset_name, input_type, data): ) monkeypatch.setattr(QuisbyProcessing, "extract_data", mock_extract_data) response = query_get_as("uperf_1", "test", HTTPStatus.INTERNAL_SERVER_ERROR) - response.json["message"] = "Unexpected failure from Quisby" + assert response.json.get("message").startswith( + "Internal Pbench Server Error: log reference " + ) def test_unsupported_benchmark(self, query_get_as, monkeypatch): def mock_get_metadata(self, dataset, key): - return {"dataset.metalog.pbench.script": "linpack"} + return {"dataset.metalog.pbench.script": "hammerDB"} monkeypatch.setattr(CacheManager, "find_dataset", self.mock_find_dataset) monkeypatch.setattr(ApiBase, "_get_dataset_metadata", mock_get_metadata) + monkeypatch.setattr(QuisbyProcessing, "extract_data", self.mock_extract_data) response = query_get_as("uperf_1", "test", HTTPStatus.UNSUPPORTED_MEDIA_TYPE) - response.json["message"] = "Unsupported Benchmark" + response.json["message"] == "Unsupported Benchmark"