Skip to content

Commit

Permalink
Address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
siddardh committed Jun 20, 2023
1 parent dc506c5 commit c9242a9
Show file tree
Hide file tree
Showing 7 changed files with 49 additions and 45 deletions.
12 changes: 6 additions & 6 deletions lib/pbench/server/api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,12 +134,6 @@ def register_endpoints(api: Api, app: Flask, config: PbenchServerConfig):
endpoint="key",
resource_class_args=(config,),
)
api.add_resource(
Visualize,
f"{base_uri}/visualize/<string:dataset>",
endpoint="visualize",
resource_class_args=(config,),
)
api.add_resource(
ServerAudit,
f"{base_uri}/server/audit",
Expand All @@ -166,6 +160,12 @@ def register_endpoints(api: Api, app: Flask, config: PbenchServerConfig):
endpoint="upload",
resource_class_args=(config,),
)
api.add_resource(
Visualize,
f"{base_uri}/datasets/<string:dataset>/visualize",
endpoint="visualize",
resource_class_args=(config,),
)


def get_server_config() -> PbenchServerConfig:
Expand Down
38 changes: 19 additions & 19 deletions lib/pbench/server/api/resources/visualize.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

class Visualize(ApiBase):
"""
API class to retrieve data using Quisby to visualize
This class implements the Server API used to retrieve data for visualization.
"""

def __init__(self, config: PbenchServerConfig):
Expand Down Expand Up @@ -63,36 +63,36 @@ def _get(
"""

dataset = params.uri["dataset"]

cache_m = CacheManager(self.config, current_app.logger)

try:
tarball = cache_m.find_dataset(dataset.resource_id)
except TarballNotFound as e:
raise APIAbort(HTTPStatus.NOT_FOUND, str(e))
raise APIAbort(
HTTPStatus.NOT_FOUND, f"No dataset with ID '{e.tarball}' found"
) from e

metadata = self._get_dataset_metadata(
dataset, ["dataset.metalog.pbench.script"]
)
benchmark = metadata["dataset.metalog.pbench.script"].upper()
benchmark_type = BenchmarkName.__members__.get(benchmark)
if not benchmark_type:
raise APIAbort(HTTPStatus.UNSUPPORTED_MEDIA_TYPE, "Unsupported Benchmark")

name = Dataset.stem(tarball.tarball_path)
try:
file = tarball.extract(tarball.tarball_path, f"{name}/result.csv")
except TarballUnpackError as e:
raise APIInternalError(str(e)) from e

metadata = self._get_dataset_metadata(
dataset, ["dataset.metalog.pbench.script"]
)
benchmark = metadata["dataset.metalog.pbench.script"]
if benchmark == "uperf":
benchmark_type = BenchmarkName.UPERF
else:
raise APIAbort(HTTPStatus.UNSUPPORTED_MEDIA_TYPE, "Unsupported Benchmark")

get_quisby_data = QuisbyProcessing.extract_data(
self, benchmark_type, dataset.name, InputType.STREAM, file
pquisby_obj = QuisbyProcessing()
get_quisby_data = pquisby_obj.extract_data(
benchmark_type, dataset.name, InputType.STREAM, file
)

if get_quisby_data["status"] == "success":
return jsonify(get_quisby_data)

else:
if get_quisby_data["status"] != "success":
raise APIInternalError(
f"Quisby processing failure. Exception: {get_quisby_data['exception']}"
)
) from None
return jsonify(get_quisby_data)
2 changes: 1 addition & 1 deletion lib/pbench/server/cache_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def __init__(self, tarball: str):
self.tarball = tarball

def __str__(self) -> str:
return f"The dataset tarball named {self.tarball!r} not found"
return f"The dataset tarball named {self.tarball!r} is not found"


class DuplicateTarball(CacheManagerError):
Expand Down
4 changes: 2 additions & 2 deletions lib/pbench/test/unit/server/test_cache_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -924,7 +924,7 @@ def mock_run(args, **kwargs):
assert tarball == cm[md5]
with pytest.raises(TarballNotFound) as exc:
cm["foobar"]
assert str(exc.value) == "The dataset tarball named 'foobar' not found"
assert str(exc.value) == "The dataset tarball named 'foobar' is not found"

# Test __contains__
assert md5 in cm
Expand All @@ -943,7 +943,7 @@ def mock_run(args, **kwargs):
# Try to find a dataset that doesn't exist
with pytest.raises(TarballNotFound) as exc:
cm.find_dataset("foobar")
assert str(exc.value) == "The dataset tarball named 'foobar' not found"
assert str(exc.value) == "The dataset tarball named 'foobar' is not found"
assert exc.value.tarball == "foobar"

# Unpack the dataset, creating INCOMING and RESULTS links
Expand Down
2 changes: 1 addition & 1 deletion lib/pbench/test/unit/server/test_datasets_inventory.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def test_get_no_dataset(self, query_get_as):
def test_dataset_not_present(self, query_get_as):
response = query_get_as("fio_2", "metadata.log", HTTPStatus.NOT_FOUND)
assert response.json == {
"message": "The dataset tarball named 'random_md5_string4' not found"
"message": "The dataset tarball named 'random_md5_string4' is not found"
}

def test_unauthorized_access(self, query_get_as):
Expand Down
8 changes: 4 additions & 4 deletions lib/pbench/test/unit/server/test_endpoint_configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,10 +106,6 @@ def check_config(self, client, server_config, host, my_headers={}):
"template": f"{uri}/key/{{key}}",
"params": {"key": {"type": "string"}},
},
"visualize": {
"template": f"{uri}/visualize/{{dataset}}",
"params": {"dataset": {"type": "string"}},
},
"relay": {
"template": f"{uri}/relay/{{uri}}",
"params": {"uri": {"type": "path"}},
Expand All @@ -123,6 +119,10 @@ def check_config(self, client, server_config, host, my_headers={}):
"template": f"{uri}/upload/{{filename}}",
"params": {"filename": {"type": "string"}},
},
"visualize": {
"template": f"{uri}/datasets/{{dataset}}/visualize",
"params": {"dataset": {"type": "string"}},
},
},
}

Expand Down
28 changes: 16 additions & 12 deletions lib/pbench/test/unit/server/test_visualize.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import requests

from pbench.server.api.resources import ApiBase
from pbench.server.cache_manager import CacheManager
from pbench.server.cache_manager import CacheManager, Tarball
from pbench.server.database.models.datasets import Dataset, DatasetNotFound


Expand All @@ -33,34 +33,37 @@ def query_api(
dataset_id = dataset # Allow passing deliberately bad value
headers = {"authorization": f"bearer {get_token_func(user)}"}
response = client.get(
f"{server_config.rest_uri}/visualize/{dataset_id}",
f"{server_config.rest_uri}/datasets/{dataset_id}/visualize",
headers=headers,
)
assert response.status_code == expected_status
return response

return query_api

def mock_find_dataset(self, dataset):
def mock_find_dataset(self, _dataset: str) -> Tarball:
class Tarball(object):
tarball_path = Path("/dataset/tarball.tar.xz")

def extract(tarball_path, path):
def extract(_tarball_path: Path, _path: str) -> str:
return "CSV_file_as_a_byte_stream"

return Tarball

def mock_get_dataset_metadata(self, dataset, key):
def mock_get_dataset_metadata(self, _dataset, _key):
return {"dataset.metalog.pbench.script": "uperf"}

def mock_extract_data(self, test_name, dataset_name, input_type, data):
return {"status": "success", "json_data": "quisby_data"}

def test_get_no_dataset(self, query_get_as):
response = query_get_as("nonexistent-dataset", "drb", HTTPStatus.NOT_FOUND)
assert response.json == {"message": "Dataset 'nonexistent-dataset' not found"}

def test_dataset_not_present(self, query_get_as):
response = query_get_as("fio_2", "drb", HTTPStatus.NOT_FOUND)
assert response.json == {
"message": "The dataset tarball named 'random_md5_string4' not found"
"message": "No dataset with ID 'random_md5_string4' found"
}

def test_unauthorized_access(self, query_get_as):
Expand All @@ -70,14 +73,12 @@ def test_unauthorized_access(self, query_get_as):
}

def test_successful_get(self, query_get_as, monkeypatch):
def mock_extract_data(self, test_name, dataset_name, input_type, data):
return {"status": "success", "json_data": "quisby_data"}

monkeypatch.setattr(CacheManager, "find_dataset", self.mock_find_dataset)
monkeypatch.setattr(
ApiBase, "_get_dataset_metadata", self.mock_get_dataset_metadata
)
monkeypatch.setattr(QuisbyProcessing, "extract_data", mock_extract_data)
monkeypatch.setattr(QuisbyProcessing, "extract_data", self.mock_extract_data)

response = query_get_as("uperf_1", "test", HTTPStatus.OK)
assert response.json["status"] == "success"
Expand All @@ -104,13 +105,16 @@ def mock_extract_data(self, test_name, dataset_name, input_type, data):
)
monkeypatch.setattr(QuisbyProcessing, "extract_data", mock_extract_data)
response = query_get_as("uperf_1", "test", HTTPStatus.INTERNAL_SERVER_ERROR)
response.json["message"] = "Unexpected failure from Quisby"
assert response.json.get("message").startswith(
"Internal Pbench Server Error: log reference "
)

def test_unsupported_benchmark(self, query_get_as, monkeypatch):
def mock_get_metadata(self, dataset, key):
return {"dataset.metalog.pbench.script": "linpack"}
return {"dataset.metalog.pbench.script": "hammerDB"}

monkeypatch.setattr(CacheManager, "find_dataset", self.mock_find_dataset)
monkeypatch.setattr(ApiBase, "_get_dataset_metadata", mock_get_metadata)
monkeypatch.setattr(QuisbyProcessing, "extract_data", self.mock_extract_data)
response = query_get_as("uperf_1", "test", HTTPStatus.UNSUPPORTED_MEDIA_TYPE)
response.json["message"] = "Unsupported Benchmark"
response.json["message"] == "Unsupported Benchmark"

0 comments on commit c9242a9

Please sign in to comment.