Skip to content

Commit

Permalink
Next set of review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
siddardh committed Jun 21, 2023
1 parent 84c308a commit 5854108
Show file tree
Hide file tree
Showing 5 changed files with 37 additions and 29 deletions.
2 changes: 1 addition & 1 deletion lib/pbench/client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,13 @@ class API(Enum):
DATASETS_NAMESPACE = "datasets_namespace"
DATASETS_SEARCH = "datasets_search"
DATASETS_VALUES = "datasets_values"
DATASETS_VISUALIZE = "datasets_visualize"
ENDPOINTS = "endpoints"
KEY = "key"
RELAY = "relay"
SERVER_AUDIT = "server_audit"
SERVER_SETTINGS = "server_settings"
UPLOAD = "upload"
VISUALIZE = "visualize"


class PbenchServerClient:
Expand Down
14 changes: 7 additions & 7 deletions lib/pbench/server/api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from pbench.server.api.resources.datasets_inventory import DatasetsInventory
from pbench.server.api.resources.datasets_list import DatasetsList
from pbench.server.api.resources.datasets_metadata import DatasetsMetadata
from pbench.server.api.resources.datasets_visualize import DatasetsVisualize
from pbench.server.api.resources.endpoint_configure import EndpointConfig
from pbench.server.api.resources.query_apis.dataset import Datasets
from pbench.server.api.resources.query_apis.datasets.datasets_contents import (
Expand All @@ -37,7 +38,6 @@
from pbench.server.api.resources.server_audit import ServerAudit
from pbench.server.api.resources.server_settings import ServerSettings
from pbench.server.api.resources.upload import Upload
from pbench.server.api.resources.visualize import Visualize
import pbench.server.auth.auth as Auth
from pbench.server.database import init_db
from pbench.server.database.database import Database
Expand Down Expand Up @@ -120,6 +120,12 @@ def register_endpoints(api: Api, app: Flask, config: PbenchServerConfig):
endpoint="datasets_search",
resource_class_args=(config,),
)
api.add_resource(
DatasetsVisualize,
f"{base_uri}/datasets/<string:dataset>/visualize",
endpoint="datasets_visualize",
resource_class_args=(config,),
)
api.add_resource(
EndpointConfig,
f"{base_uri}/endpoints",
Expand Down Expand Up @@ -160,12 +166,6 @@ def register_endpoints(api: Api, app: Flask, config: PbenchServerConfig):
endpoint="upload",
resource_class_args=(config,),
)
api.add_resource(
Visualize,
f"{base_uri}/datasets/<string:dataset>/visualize",
endpoint="visualize",
resource_class_args=(config,),
)


def get_server_config() -> PbenchServerConfig:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from pbench.server.database import Dataset


class Visualize(ApiBase):
class DatasetsVisualize(ApiBase):
"""
This class implements the Server API used to retrieve data for visualization.
"""
Expand Down Expand Up @@ -78,21 +78,22 @@ def _get(
benchmark = metadata["dataset.metalog.pbench.script"].upper()
benchmark_type = BenchmarkName.__members__.get(benchmark)
if not benchmark_type:
raise APIAbort(HTTPStatus.UNSUPPORTED_MEDIA_TYPE, "Unsupported Benchmark")
raise APIAbort(
HTTPStatus.UNSUPPORTED_MEDIA_TYPE, f"Unsupported Benchmark: {benchmark}"
)

name = Dataset.stem(tarball.tarball_path)
try:
file = tarball.extract(tarball.tarball_path, f"{name}/result.csv")
except TarballUnpackError as e:
raise APIInternalError(str(e)) from e

pquisby_obj = QuisbyProcessing()
get_quisby_data = pquisby_obj.extract_data(
get_quisby_data = QuisbyProcessing().extract_data(
benchmark_type, dataset.name, InputType.STREAM, file
)

if get_quisby_data["status"] != "success":
raise APIInternalError(
f"Quisby processing failure. Exception: {get_quisby_data['exception']}"
) from None
)
return jsonify(get_quisby_data)
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pytest
import requests

from pbench.server import JSON
from pbench.server.api.resources import ApiBase
from pbench.server.cache_manager import CacheManager, Tarball
from pbench.server.database.models.datasets import Dataset, DatasetNotFound
Expand Down Expand Up @@ -50,12 +51,9 @@ def extract(_tarball_path: Path, _path: str) -> str:

return Tarball

def mock_get_dataset_metadata(self, _dataset, _key):
def mock_get_dataset_metadata(self, _dataset, _key) -> JSON:
return {"dataset.metalog.pbench.script": "uperf"}

def mock_extract_data(self, test_name, dataset_name, input_type, data):
return {"status": "success", "json_data": "quisby_data"}

def test_get_no_dataset(self, query_get_as):
response = query_get_as("nonexistent-dataset", "drb", HTTPStatus.NOT_FOUND)
assert response.json == {"message": "Dataset 'nonexistent-dataset' not found"}
Expand All @@ -73,28 +71,30 @@ def test_unauthorized_access(self, query_get_as):
}

def test_successful_get(self, query_get_as, monkeypatch):
def mock_extract_data(self, test_name, dataset_name, input_type, data) -> JSON:
return {"status": "success", "json_data": "quisby_data"}

monkeypatch.setattr(CacheManager, "find_dataset", self.mock_find_dataset)
monkeypatch.setattr(
ApiBase, "_get_dataset_metadata", self.mock_get_dataset_metadata
)
monkeypatch.setattr(QuisbyProcessing, "extract_data", self.mock_extract_data)
monkeypatch.setattr(QuisbyProcessing, "extract_data", mock_extract_data)

response = query_get_as("uperf_1", "test", HTTPStatus.OK)
assert response.json["status"] == "success"
assert response.json["json_data"] == "quisby_data"

def test_unsuccessful_get_with_incorrect_data(self, query_get_as, monkeypatch):
def mock_find_dataset_with_incorrect_data(self, dataset):
def mock_find_dataset_with_incorrect_data(self, dataset) -> Tarball:
class Tarball(object):
tarball_path = Path("/dataset/tarball.tar.xz")

def extract(tarball_path, path):
def extract(tarball_path, path) -> str:
return "IncorrectData"

return Tarball

def mock_extract_data(self, test_name, dataset_name, input_type, data):
def mock_extract_data(self, test_name, dataset_name, input_type, data) -> JSON:
return {"status": "failed", "exception": "Unsupported Media Type"}

monkeypatch.setattr(
Expand All @@ -105,16 +105,23 @@ def mock_extract_data(self, test_name, dataset_name, input_type, data):
)
monkeypatch.setattr(QuisbyProcessing, "extract_data", mock_extract_data)
response = query_get_as("uperf_1", "test", HTTPStatus.INTERNAL_SERVER_ERROR)
assert response.json.get("message").startswith(
assert response.json["message"].startswith(
"Internal Pbench Server Error: log reference "
)

def test_unsupported_benchmark(self, query_get_as, monkeypatch):
def mock_get_metadata(self, dataset, key):
flag = True

def mock_extract_data(*args, **kwargs) -> JSON:
nonlocal flag
flag = False

def mock_get_metadata(self, dataset, key) -> JSON:
return {"dataset.metalog.pbench.script": "hammerDB"}

monkeypatch.setattr(CacheManager, "find_dataset", self.mock_find_dataset)
monkeypatch.setattr(ApiBase, "_get_dataset_metadata", mock_get_metadata)
monkeypatch.setattr(QuisbyProcessing, "extract_data", self.mock_extract_data)
monkeypatch.setattr(QuisbyProcessing, "extract_data", mock_extract_data)
response = query_get_as("uperf_1", "test", HTTPStatus.UNSUPPORTED_MEDIA_TYPE)
response.json["message"] == "Unsupported Benchmark"
assert response.json["message"] == "Unsupported Benchmark: HAMMERDB"
assert flag is True
8 changes: 4 additions & 4 deletions lib/pbench/test/unit/server/test_endpoint_configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,10 @@ def check_config(self, client, server_config, host, my_headers={}):
"dataset_view": {"type": "string"},
},
},
"datasets_visualize": {
"template": f"{uri}/datasets/{{dataset}}/visualize",
"params": {"dataset": {"type": "string"}},
},
"endpoints": {"template": f"{uri}/endpoints", "params": {}},
"key": {
"template": f"{uri}/key/{{key}}",
Expand All @@ -119,10 +123,6 @@ def check_config(self, client, server_config, host, my_headers={}):
"template": f"{uri}/upload/{{filename}}",
"params": {"filename": {"type": "string"}},
},
"visualize": {
"template": f"{uri}/datasets/{{dataset}}/visualize",
"params": {"dataset": {"type": "string"}},
},
},
}

Expand Down

0 comments on commit 5854108

Please sign in to comment.