From 6a85bdf9d2864ae92d1612cebf68aefdc464b0e1 Mon Sep 17 00:00:00 2001 From: Alejandro Saucedo Date: Tue, 22 Oct 2019 12:02:24 +0100 Subject: [PATCH] Added python and testing folders as black --- python/seldon_core/__init__.py | 2 +- python/seldon_core/api_tester.py | 67 +- python/seldon_core/flask_utils.py | 37 +- python/seldon_core/metrics.py | 6 +- python/seldon_core/microservice.py | 174 +-- python/seldon_core/microservice_tester.py | 63 +- python/seldon_core/persistence.py | 3 +- python/seldon_core/seldon_client.py | 1055 ++++++++++++----- python/seldon_core/seldon_methods.py | 137 ++- python/seldon_core/serving_test_gen.py | 25 +- python/seldon_core/storage.py | 42 +- python/seldon_core/tf_helper.py | 1 + python/seldon_core/user_model.py | 136 ++- python/seldon_core/utils.py | 123 +- python/seldon_core/version.py | 2 +- python/seldon_core/wrapper.py | 58 +- python/setup.py | 96 +- python/tests/model-template-app/MyModel.py | 2 + .../model-template-app2/mymodule/my_model.py | 2 + python/tests/test_api_tester.py | 49 +- ...test_application_exception_microservice.py | 24 +- python/tests/test_combiner_microservice.py | 70 +- python/tests/test_metrics.py | 17 +- python/tests/test_microservice.py | 178 +-- python/tests/test_microservice_tester.py | 77 +- python/tests/test_model_microservice.py | 189 +-- python/tests/test_router_microservice.py | 49 +- python/tests/test_seldon_client.py | 124 +- python/tests/test_transformer_microservice.py | 113 +- python/tests/test_user_model.py | 31 +- python/tests/test_utils.py | 139 +-- python/tests/utils.py | 3 +- testing/docker/fixed-model/ModelV1.py | 10 +- testing/docker/fixed-model/ModelV2.py | 10 +- testing/s2i/python/combiner/MyCombiner.py | 5 +- testing/s2i/python/model/MyModel.py | 8 +- testing/s2i/python/router/MyRouter.py | 6 +- .../s2i/python/transformer/MyTransformer.py | 7 +- testing/scripts/conftest.py | 3 + testing/scripts/k8s_utils.py | 9 +- testing/scripts/s2i_utils.py | 12 +- testing/scripts/seldon_utils.py | 151 ++- testing/scripts/test_bad_graphs.py | 23 +- .../scripts/test_helm_charts_clusterwide.py | 30 +- testing/scripts/test_prepackaged_servers.py | 48 +- testing/scripts/test_rolling_updates.py | 179 ++- testing/scripts/test_s2i_python.py | 41 +- 47 files changed, 2341 insertions(+), 1295 deletions(-) diff --git a/python/seldon_core/__init__.py b/python/seldon_core/__init__.py index 1f370b1d23..fc86926a3f 100644 --- a/python/seldon_core/__init__.py +++ b/python/seldon_core/__init__.py @@ -1,2 +1,2 @@ from seldon_core.version import __version__ -from .storage import Storage \ No newline at end of file +from .storage import Storage diff --git a/python/seldon_core/api_tester.py b/python/seldon_core/api_tester.py index d0a08ba8ce..62fc4ea5e6 100644 --- a/python/seldon_core/api_tester.py +++ b/python/seldon_core/api_tester.py @@ -28,17 +28,26 @@ def get_seldon_client(args) -> SeldonClient: else: seldon_grpc_endpoint = None seldon_rest_endpoint = endpoint - sc = SeldonClient(gateway="seldon", seldon_rest_endpoint=seldon_rest_endpoint, - seldon_grpc_endpoint=seldon_grpc_endpoint, - oauth_key=args.oauth_key, oauth_secret=args.oauth_secret) + sc = SeldonClient( + gateway="seldon", + seldon_rest_endpoint=seldon_rest_endpoint, + seldon_grpc_endpoint=seldon_grpc_endpoint, + oauth_key=args.oauth_key, + oauth_secret=args.oauth_secret, + ) else: gateway_endpoint = endpoint if args.grpc: transport = "grpc" else: transport = "rest" - sc = SeldonClient(gateway="ambassador", gateway_endpoint=gateway_endpoint, transport=transport, - deployment_name=args.deployment, namespace=args.namespace) + sc = SeldonClient( + gateway="ambassador", + gateway_endpoint=gateway_endpoint, + transport=transport, + deployment_name=args.deployment, + namespace=args.namespace, + ) return sc @@ -52,7 +61,7 @@ def run_send_feedback(args): Command line args """ - contract = json.load(open(args.contract, 'r')) + contract = json.load(open(args.contract, "r")) contract = unfold_contract(contract) sc = get_seldon_client(args) if args.grpc: @@ -61,11 +70,15 @@ def run_send_feedback(args): transport = "rest" for i in range(args.n_requests): - batch = generate_batch(contract, args.batch_size, 'features') + batch = generate_batch(contract, args.batch_size, "features") response_predict = sc.predict(data=batch, deployment_name=args.deployment) - response_feedback = sc.feedback(prediction_request=response_predict.request, - prediction_response=response_predict.response, reward=1.0, - deployment_name=args.deployment, transport=transport) + response_feedback = sc.feedback( + prediction_request=response_predict.request, + prediction_response=response_predict.response, + reward=1.0, + deployment_name=args.deployment, + transport=transport, + ) if args.prnt: print(f"RECEIVED RESPONSE:\n{response_feedback}\n") @@ -80,7 +93,7 @@ def run_predict(args): Command line args """ - contract = json.load(open(args.contract, 'r')) + contract = json.load(open(args.contract, "r")) contract = unfold_contract(contract) feature_names = [feature["name"] for feature in contract["features"]] @@ -92,36 +105,50 @@ def run_predict(args): payload_type = "tensor" if args.tensor else "ndarray" for i in range(args.n_requests): - batch = generate_batch(contract, args.batch_size, 'features') + batch = generate_batch(contract, args.batch_size, "features") if args.prnt: print(f"{'-' * 40}\nSENDING NEW REQUEST:\n") print(batch) - response_predict = sc.predict(data=batch, deployment_name=args.deployment, names=feature_names, payload_type=payload_type) + response_predict = sc.predict( + data=batch, + deployment_name=args.deployment, + names=feature_names, + payload_type=payload_type, + ) if args.prnt: print(f"RECEIVED RESPONSE:\n{response_predict.response}\n") def main(): parser = argparse.ArgumentParser() - parser.add_argument("contract", type=str, - help="File that contains the data contract") + parser.add_argument( + "contract", type=str, help="File that contains the data contract" + ) parser.add_argument("host", type=str) parser.add_argument("port", type=int) - parser.add_argument("deployment", type=str, nargs='?', default="mymodel") - parser.add_argument("--endpoint", type=str, choices=["predict", "send-feedback"], default="predict") + parser.add_argument("deployment", type=str, nargs="?", default="mymodel") + parser.add_argument( + "--endpoint", type=str, choices=["predict", "send-feedback"], default="predict" + ) parser.add_argument("-b", "--batch-size", type=int, default=1) parser.add_argument("-n", "--n-requests", type=int, default=1) parser.add_argument("--grpc", action="store_true") parser.add_argument("-t", "--tensor", action="store_true") - parser.add_argument("-p", "--prnt", action="store_true", help="Prints requests and responses") - parser.add_argument("--log-level", type=str, choices=["DEBUG", "INFO", "ERROR"], default="ERROR") + parser.add_argument( + "-p", "--prnt", action="store_true", help="Prints requests and responses" + ) + parser.add_argument( + "--log-level", type=str, choices=["DEBUG", "INFO", "ERROR"], default="ERROR" + ) parser.add_argument("--namespace", type=str) parser.add_argument("--oauth-port", type=int) parser.add_argument("--oauth-key") parser.add_argument("--oauth-secret") args = parser.parse_args() - LOG_FORMAT = '%(asctime)s - %(name)s:%(funcName)s:%(lineno)s - %(levelname)s: %(message)s' + LOG_FORMAT = ( + "%(asctime)s - %(name)s:%(funcName)s:%(lineno)s - %(levelname)s: %(message)s" + ) if args.log_level == "DEBUG": log_level = logging.DEBUG elif args.log_level == "INFO": diff --git a/python/seldon_core/flask_utils.py b/python/seldon_core/flask_utils.py index 690843e0fd..91f9fd99c5 100644 --- a/python/seldon_core/flask_utils.py +++ b/python/seldon_core/flask_utils.py @@ -3,6 +3,7 @@ from typing import Dict import base64 + def get_multi_form_data_request() -> Dict: """ Parses a request submitted with Content-type:multipart/form-data @@ -18,23 +19,24 @@ def get_multi_form_data_request() -> Dict: """ req_dict = {} for key in request.form: - if key == 'strData': - req_dict[key]=request.form.get(key) + if key == "strData": + req_dict[key] = request.form.get(key) else: - req_dict[key]=json.loads(request.form.get(key)) + req_dict[key] = json.loads(request.form.get(key)) for fileKey in request.files: """ The bytes data needs to be base64 encode because the protobuf trys to do base64 decode for bytes """ - if fileKey == 'binData': - req_dict[fileKey]=base64.b64encode(request.files[fileKey].read()) + if fileKey == "binData": + req_dict[fileKey] = base64.b64encode(request.files[fileKey].read()) else: """ This is the case when strData can be passed as file as well """ - req_dict[fileKey]=request.files[fileKey].read().decode('utf-8') + req_dict[fileKey] = request.files[fileKey].read().decode("utf-8") return req_dict + def get_request() -> Dict: """ Parse a request to get JSON dict @@ -45,14 +47,17 @@ def get_request() -> Dict: """ - if request.content_type is not None and 'multipart/form-data' in request.content_type: + if ( + request.content_type is not None + and "multipart/form-data" in request.content_type + ): return get_multi_form_data_request() j_str = request.form.get("json") if j_str: message = json.loads(j_str) else: - j_str = request.args.get('json') + j_str = request.args.get("json") if j_str: message = json.loads(j_str) else: @@ -67,7 +72,9 @@ def get_request() -> Dict: class SeldonMicroserviceException(Exception): status_code = 400 - def __init__(self, message, status_code=None, payload=None, reason="MICROSERVICE_BAD_DATA"): + def __init__( + self, message, status_code=None, payload=None, reason="MICROSERVICE_BAD_DATA" + ): Exception.__init__(self) self.message = message if status_code is not None: @@ -76,10 +83,16 @@ def __init__(self, message, status_code=None, payload=None, reason="MICROSERVICE self.reason = reason def to_dict(self): - rv = {"status": {"status": 1, "info": self.message, - "code": -1, "reason": self.reason}} + rv = { + "status": { + "status": 1, + "info": self.message, + "code": -1, + "reason": self.reason, + } + } return rv ANNOTATIONS_FILE = "/etc/podinfo/annotations" -ANNOTATION_GRPC_MAX_MSG_SIZE = 'seldon.io/grpc-max-message-size' +ANNOTATION_GRPC_MAX_MSG_SIZE = "seldon.io/grpc-max-message-size" diff --git a/python/seldon_core/metrics.py b/python/seldon_core/metrics.py index 6ccc581a8e..86f5f7f4b3 100644 --- a/python/seldon_core/metrics.py +++ b/python/seldon_core/metrics.py @@ -78,7 +78,11 @@ def validate_metrics(metrics: List[Dict]) -> bool: for metric in metrics: if not ("key" in metric and "value" in metric and "type" in metric): return False - if not (metric["type"] == COUNTER or metric["type"] == GAUGE or metric["type"] == TIMER): + if not ( + metric["type"] == COUNTER + or metric["type"] == GAUGE + or metric["type"] == TIMER + ): return False try: metric["value"] + 1 diff --git a/python/seldon_core/microservice.py b/python/seldon_core/microservice.py index 684849e39d..c44e6abb27 100644 --- a/python/seldon_core/microservice.py +++ b/python/seldon_core/microservice.py @@ -64,7 +64,7 @@ def parse_parameters(parameters: Dict) -> Dict: "FLOAT": float, "DOUBLE": float, "STRING": str, - "BOOL": bool + "BOOL": bool, } parsed_parameters = {} for param in parameters: @@ -78,12 +78,21 @@ def parse_parameters(parameters: Dict) -> Dict: parsed_parameters[name] = type_dict[type_](value) except ValueError: raise SeldonMicroserviceException( - "Bad model parameter: " + name + " with value " + value + " can't be parsed as a " + type_, - reason="MICROSERVICE_BAD_PARAMETER") + "Bad model parameter: " + + name + + " with value " + + value + + " can't be parsed as a " + + type_, + reason="MICROSERVICE_BAD_PARAMETER", + ) except KeyError: raise SeldonMicroserviceException( - "Bad model parameter type: " + type_ + " valid are INT, FLOAT, DOUBLE, STRING, BOOL", - reason="MICROSERVICE_BAD_PARAMETER") + "Bad model parameter type: " + + type_ + + " valid are INT, FLOAT, DOUBLE, STRING, BOOL", + reason="MICROSERVICE_BAD_PARAMETER", + ) return parsed_parameters @@ -123,15 +132,12 @@ def setup_tracing(interface_name: str) -> object: logger.info("Using default tracing config") config = Config( config={ # usually read from some yaml config - 'sampler': { - 'type': 'const', - 'param': 1, + "sampler": {"type": "const", "param": 1}, + "local_agent": { + "reporting_host": jaeger_serv, + "reporting_port": jaeger_port, }, - 'local_agent': { - 'reporting_host': jaeger_serv, - 'reporting_port': jaeger_port, - }, - 'logging': True, + "logging": True, }, service_name=interface_name, validate=True, @@ -139,32 +145,36 @@ def setup_tracing(interface_name: str) -> object: else: logger.info("Loading tracing config from %s", jaeger_config) import yaml - with open(jaeger_config, 'r') as stream: + + with open(jaeger_config, "r") as stream: config_dict = yaml.load(stream) config = Config( - config=config_dict, - service_name=interface_name, - validate=True, + config=config_dict, service_name=interface_name, validate=True ) # this call also sets opentracing.tracer return config.initialize_tracer() -class StandaloneApplication(gunicorn.app.base.BaseApplication): - def __init__(self, app,user_object,options:Dict = None): +class StandaloneApplication(gunicorn.app.base.BaseApplication): + def __init__(self, app, user_object, options: Dict = None): self.application = app self.user_object = user_object self.options = options super(StandaloneApplication, self).__init__() def load_config(self): - config = dict([(key, value) for key, value in iteritems(self.options) - if key in self.cfg.settings and value is not None]) + config = dict( + [ + (key, value) + for key, value in iteritems(self.options) + if key in self.cfg.settings and value is not None + ] + ) for key, value in iteritems(config): self.cfg.set(key.lower(), value) def load(self): - logger.debug("LOADING APP %d",os.getpid()) + logger.debug("LOADING APP %d", os.getpid()) try: logger.debug("Calling user load method") self.user_object.load() @@ -174,50 +184,72 @@ def load(self): def main(): - LOG_FORMAT = '%(asctime)s - %(name)s:%(funcName)s:%(lineno)s - %(levelname)s: %(message)s' + LOG_FORMAT = ( + "%(asctime)s - %(name)s:%(funcName)s:%(lineno)s - %(levelname)s: %(message)s" + ) logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) - logger.info('Starting microservice.py:main') + logger.info("Starting microservice.py:main") sys.path.append(os.getcwd()) parser = argparse.ArgumentParser() - parser.add_argument("interface_name", type=str, - help="Name of the user interface.") + parser.add_argument("interface_name", type=str, help="Name of the user interface.") parser.add_argument("api_type", type=str, choices=["REST", "GRPC", "FBS"]) - parser.add_argument("--service-type", type=str, choices=[ - "MODEL", "ROUTER", "TRANSFORMER", "COMBINER", "OUTLIER_DETECTOR"], default="MODEL") - parser.add_argument("--persistence", nargs='?', default=0, const=1, type=int) - parser.add_argument("--parameters", type=str, - default=os.environ.get(PARAMETERS_ENV_NAME, "[]")) + parser.add_argument( + "--service-type", + type=str, + choices=["MODEL", "ROUTER", "TRANSFORMER", "COMBINER", "OUTLIER_DETECTOR"], + default="MODEL", + ) + parser.add_argument("--persistence", nargs="?", default=0, const=1, type=int) + parser.add_argument( + "--parameters", type=str, default=os.environ.get(PARAMETERS_ENV_NAME, "[]") + ) parser.add_argument("--log-level", type=str, default="INFO") - parser.add_argument("--tracing", nargs='?', - default=int(os.environ.get("TRACING", "0")), const=1, type=int) + parser.add_argument( + "--tracing", + nargs="?", + default=int(os.environ.get("TRACING", "0")), + const=1, + type=int, + ) # gunicorn settings, defaults are from http://docs.gunicorn.org/en/stable/settings.html - parser.add_argument("--workers", - type=int, - default=int(os.environ.get("GUNICORN_WORKERS", "1")), - help="Number of gunicorn workers for handling requests.") - parser.add_argument("--max-requests", - type=int, - default=int(os.environ.get("GUNICORN_MAX_REQUESTS", "0")), - help="Maximum number of requests gunicorn worker will process before restarting.") - parser.add_argument("--max-requests-jitter", - type=int, - default=int(os.environ.get("GUNICORN_MAX_REQUESTS_JITTER", "0")), - help="Maximum random jitter to add to max-requests.") + parser.add_argument( + "--workers", + type=int, + default=int(os.environ.get("GUNICORN_WORKERS", "1")), + help="Number of gunicorn workers for handling requests.", + ) + parser.add_argument( + "--max-requests", + type=int, + default=int(os.environ.get("GUNICORN_MAX_REQUESTS", "0")), + help="Maximum number of requests gunicorn worker will process before restarting.", + ) + parser.add_argument( + "--max-requests-jitter", + type=int, + default=int(os.environ.get("GUNICORN_MAX_REQUESTS_JITTER", "0")), + help="Maximum random jitter to add to max-requests.", + ) args = parser.parse_args() parameters = parse_parameters(json.loads(args.parameters)) # set flask trace jaeger extra tags - jaeger_extra_tags = list(filter(lambda x: (x != ""), [tag.strip() for tag in os.environ.get("JAEGER_EXTRA_TAGS", "").split(",")])) - logger.info('Parse JAEGER_EXTRA_TAGS %s', jaeger_extra_tags) + jaeger_extra_tags = list( + filter( + lambda x: (x != ""), + [tag.strip() for tag in os.environ.get("JAEGER_EXTRA_TAGS", "").split(",")], + ) + ) + logger.info("Parse JAEGER_EXTRA_TAGS %s", jaeger_extra_tags) # set up log level log_level_raw = os.environ.get(LOG_LEVEL_ENV, args.log_level.upper()) log_level_num = getattr(logging, log_level_raw, None) if not isinstance(log_level_num, int): - raise ValueError('Invalid log level: %s', args.log_level) + raise ValueError("Invalid log level: %s", args.log_level) logger.setLevel(log_level_num) logger.debug("Log level set to %s:%s", args.log_level, log_level_num) @@ -227,16 +259,16 @@ def main(): parts = args.interface_name.rsplit(".", 1) if len(parts) == 1: - logger.info("Importing %s",args.interface_name) + logger.info("Importing %s", args.interface_name) interface_file = importlib.import_module(args.interface_name) user_class = getattr(interface_file, args.interface_name) else: - logger.info("Importing submodule %s",parts) + logger.info("Importing submodule %s", parts) interface_file = importlib.import_module(parts[0]) user_class = getattr(interface_file, parts[1]) if args.persistence: - logger.info('Restoring persisted component') + logger.info("Restoring persisted component") user_object = persistence.restore(user_class, parameters) persistence.persist(user_object, parameters.get("push_frequency")) else: @@ -253,25 +285,23 @@ def main(): if args.tracing: tracer = setup_tracing(args.interface_name) - - - if args.api_type == "REST": if args.workers > 1: + def rest_prediction_server(): options = { - 'bind': '%s:%s' % ('0.0.0.0', port), - 'access_logfile': '-', - 'loglevel': 'info', - 'timeout': 5000, - 'reload': 'true', - 'workers': args.workers, - 'max_requests': args.max_requests, - 'max_requests_jitter': args.max_requests_jitter, + "bind": "%s:%s" % ("0.0.0.0", port), + "access_logfile": "-", + "loglevel": "info", + "timeout": 5000, + "reload": "true", + "workers": args.workers, + "max_requests": args.max_requests, + "max_requests_jitter": args.max_requests_jitter, } app = seldon_microservice.get_rest_microservice(user_object) - StandaloneApplication(app,user_object,options=options).run() + StandaloneApplication(app, user_object, options=options).run() logger.info("REST gunicorn microservice running on port %i", port) server1_func = rest_prediction_server @@ -285,28 +315,32 @@ def rest_prediction_server(): except (NotImplementedError, AttributeError): pass if args.tracing: - logger.info('Tracing branch is active') + logger.info("Tracing branch is active") from flask_opentracing import FlaskTracer - logger.info('Set JAEGER_EXTRA_TAGS %s', jaeger_extra_tags) + + logger.info("Set JAEGER_EXTRA_TAGS %s", jaeger_extra_tags) tracing = FlaskTracer(tracer, True, app, jaeger_extra_tags) - app.run(host='0.0.0.0', port=port) + app.run(host="0.0.0.0", port=port) logger.info("REST microservice running on port %i", port) server1_func = rest_prediction_server elif args.api_type == "GRPC": + def grpc_prediction_server(): if args.tracing: from grpc_opentracing import open_tracing_server_interceptor + logger.info("Adding tracer") interceptor = open_tracing_server_interceptor(tracer) else: interceptor = None server = seldon_microservice.get_grpc_server( - user_object, annotations=annotations, trace_interceptor=interceptor) + user_object, annotations=annotations, trace_interceptor=interceptor + ) try: user_object.load() @@ -326,12 +360,14 @@ def grpc_prediction_server(): else: server1_func = None - if hasattr(user_object, 'custom_service') and callable(getattr(user_object, 'custom_service')): + if hasattr(user_object, "custom_service") and callable( + getattr(user_object, "custom_service") + ): server2_func = user_object.custom_service else: server2_func = None - logger.info('Starting servers') + logger.info("Starting servers") start_servers(server1_func, server2_func) diff --git a/python/seldon_core/microservice_tester.py b/python/seldon_core/microservice_tester.py index e79e478ed7..751bc9f125 100644 --- a/python/seldon_core/microservice_tester.py +++ b/python/seldon_core/microservice_tester.py @@ -12,7 +12,9 @@ def __init__(self, message): super().__init__(message) -def gen_continuous(f_range: Tuple[Union[float, str], Union[float, str]], n: int) -> np.ndarray: +def gen_continuous( + f_range: Tuple[Union[float, str], Union[float, str]], n: int +) -> np.ndarray: """ Create a continuous feature based on given range @@ -105,7 +107,7 @@ def generate_batch(contract: Dict, n: int, field: str) -> np.ndarray: if len(ty_set) == 1: return np.concatenate(feature_batches, axis=1) else: - out = np.empty((n, len(contract['features'])), dtype=object) + out = np.empty((n, len(contract["features"])), dtype=object) return np.concatenate(feature_batches, axis=1, out=out) @@ -158,6 +160,7 @@ def get_class_names(contract: Dict) -> List[str]: names.append(feature["name"]) return names + def run_send_feedback(args): """ Make a feedback call to microservice @@ -168,15 +171,15 @@ def run_send_feedback(args): Command line args """ - contract = json.load(open(args.contract, 'r')) + contract = json.load(open(args.contract, "r")) contract = unfold_contract(contract) endpoint = args.host + ":" + str(args.port) sc = SeldonClient(microservice_endpoint=endpoint) for i in range(args.n_requests): - batch = generate_batch(contract, args.batch_size, 'features') + batch = generate_batch(contract, args.batch_size, "features") if args.prnt: - print('-' * 40) + print("-" * 40) print("SENDING NEW REQUEST:") if not args.grpc: @@ -189,10 +192,15 @@ def run_send_feedback(args): else: payload_type = "ndarray" - response_predict = sc.microservice(data=batch, transport=transport, payload_type=payload_type, method="predict") - response_feedback = sc.microservice_feedback(prediction_request=response_predict.request, - prediction_response=response_predict.response, reward=1.0, - transport=transport) + response_predict = sc.microservice( + data=batch, transport=transport, payload_type=payload_type, method="predict" + ) + response_feedback = sc.microservice_feedback( + prediction_request=response_predict.request, + prediction_response=response_predict.response, + reward=1.0, + transport=transport, + ) if args.prnt: print(f"RECEIVED RESPONSE:\n{response_feedback}\n") @@ -207,14 +215,14 @@ def run_method(args, method): Command line args """ - contract = json.load(open(args.contract, 'r')) + contract = json.load(open(args.contract, "r")) contract = unfold_contract(contract) feature_names = [feature["name"] for feature in contract["features"]] endpoint = f"{args.host}:{args.port}" sc = SeldonClient(microservice_endpoint=endpoint) for i in range(args.n_requests): - batch: ndarray = generate_batch(contract, args.batch_size, 'features') + batch: ndarray = generate_batch(contract, args.batch_size, "features") if args.prnt: print(f"{'-' * 40}\nSENDING NEW REQUEST:\n") print(batch) @@ -222,7 +230,13 @@ def run_method(args, method): transport = "grpc" if args.grpc else "rest" payload_type = "tensor" if args.tensor else "ndarray" - response = sc.microservice(data=batch, transport=transport, method=method, payload_type=payload_type, names=feature_names) + response = sc.microservice( + data=batch, + transport=transport, + method=method, + payload_type=payload_type, + names=feature_names, + ) if args.prnt: print(f"RECEIVED RESPONSE:\n{response.response}\n") @@ -230,22 +244,33 @@ def run_method(args, method): def main(): parser = argparse.ArgumentParser() - parser.add_argument("contract", type=str, - help="File that contains the data contract") + parser.add_argument( + "contract", type=str, help="File that contains the data contract" + ) parser.add_argument("host", type=str) parser.add_argument("port", type=int) - parser.add_argument("--endpoint", type=str, - choices=["predict", "send-feedback", "transform-input"], default="predict") + parser.add_argument( + "--endpoint", + type=str, + choices=["predict", "send-feedback", "transform-input"], + default="predict", + ) parser.add_argument("-b", "--batch-size", type=int, default=1) parser.add_argument("-n", "--n-requests", type=int, default=1) parser.add_argument("--grpc", action="store_true") parser.add_argument("-t", "--tensor", action="store_true") - parser.add_argument("-p", "--prnt", action="store_true", help="Prints requests and responses") - parser.add_argument("--log-level", type=str, choices=["DEBUG", "INFO", "ERROR"], default="ERROR") + parser.add_argument( + "-p", "--prnt", action="store_true", help="Prints requests and responses" + ) + parser.add_argument( + "--log-level", type=str, choices=["DEBUG", "INFO", "ERROR"], default="ERROR" + ) args = parser.parse_args() - LOG_FORMAT = '%(asctime)s - %(name)s:%(funcName)s:%(lineno)s - %(levelname)s: %(message)s' + LOG_FORMAT = ( + "%(asctime)s - %(name)s:%(funcName)s:%(lineno)s - %(levelname)s: %(message)s" + ) if args.log_level == "DEBUG": log_level = logging.DEBUG elif args.log_level == "INFO": diff --git a/python/seldon_core/persistence.py b/python/seldon_core/persistence.py index 794ca0eb40..81d5a27690 100644 --- a/python/seldon_core/persistence.py +++ b/python/seldon_core/persistence.py @@ -13,7 +13,7 @@ PREDICTOR_ID = os.environ.get("PREDICTOR_ID", "0") DEPLOYMENT_ID = os.environ.get("SELDON_DEPLOYMENT_ID", "0") REDIS_KEY = f"persistence_{DEPLOYMENT_ID}_{PREDICTOR_ID}_{PRED_UNIT_ID}" -REDIS_HOST = os.environ.get('REDIS_SERVICE_HOST', 'localhost') +REDIS_HOST = os.environ.get("REDIS_SERVICE_HOST", "localhost") REDIS_PORT = os.environ.get("REDIS_SERVICE_PORT", 6379) DEFAULT_PUSH_FREQUENCY = 60 @@ -66,7 +66,6 @@ def persist(user_object: SeldonComponent, push_frequency: int = None): class PersistenceThread(threading.Thread): - def __init__(self, user_object, push_frequency): self.user_object = user_object self.push_frequency = push_frequency diff --git a/python/seldon_core/seldon_client.py b/python/seldon_core/seldon_client.py index 2ebd432d07..9b26674203 100644 --- a/python/seldon_core/seldon_client.py +++ b/python/seldon_core/seldon_client.py @@ -1,7 +1,12 @@ from seldon_core.proto import prediction_pb2 from seldon_core.proto import prediction_pb2_grpc -from seldon_core.utils import array_to_grpc_datadef, seldon_message_to_json, \ - json_to_seldon_message, feedback_to_json, seldon_messages_to_json +from seldon_core.utils import ( + array_to_grpc_datadef, + seldon_message_to_json, + json_to_seldon_message, + feedback_to_json, + seldon_messages_to_json, +) import numpy as np import grpc import requests @@ -18,12 +23,14 @@ class SeldonClientException(Exception): """ Seldon Client Exception """ + status_code = 400 def __init__(self, message): Exception.__init__(self) self.message = message + class SeldonChannelCredentials(object): """ Channel credentials @@ -35,15 +42,19 @@ class SeldonChannelCredentials(object): for the root_certificates_file otherwise it may not work as expected. """ - def __init__(self, verify: bool = True, root_certificates_file: str = None, - private_key_file: str = None, certificate_chain_file: str = None): + def __init__( + self, + verify: bool = True, + root_certificates_file: str = None, + private_key_file: str = None, + certificate_chain_file: str = None, + ): self.verify = verify self.root_certificates_file = root_certificates_file self.private_key_file = private_key_file self.certificate_chain_file = certificate_chain_file - class SeldonCallCredentials(object): """ Credentials for each call, currently implements the ability to provide @@ -51,17 +62,22 @@ class SeldonCallCredentials(object): the X-Auth-Token header, and via GRPC via the metadata call creds. """ - def __init__(self,token:str = None): + def __init__(self, token: str = None): self.token = token + class SeldonClientPrediction(object): """ Data class to return from Seldon Client """ - def __init__(self, request: Optional[prediction_pb2.SeldonMessage], - response: Optional[prediction_pb2.SeldonMessage], - success: bool = True, msg: str = ""): + def __init__( + self, + request: Optional[prediction_pb2.SeldonMessage], + response: Optional[prediction_pb2.SeldonMessage], + success: bool = True, + msg: str = "", + ): self.request = request self.response = response self.success = success @@ -69,7 +85,11 @@ def __init__(self, request: Optional[prediction_pb2.SeldonMessage], def __repr__(self): return "Success:%s message:%s\nRequest:\n%s\nResponse:\n%s" % ( - self.success, self.msg, self.request, self.response) + self.success, + self.msg, + self.request, + self.response, + ) class SeldonClientFeedback(object): @@ -77,9 +97,13 @@ class SeldonClientFeedback(object): Data class to return from Seldon Client for feedback calls """ - def __init__(self, request: Optional[prediction_pb2.Feedback], response: Optional[prediction_pb2.SeldonMessage], - success: bool = True, - msg: str = ""): + def __init__( + self, + request: Optional[prediction_pb2.Feedback], + response: Optional[prediction_pb2.SeldonMessage], + success: bool = True, + msg: str = "", + ): self.request = request self.response = response self.success = success @@ -87,7 +111,11 @@ def __init__(self, request: Optional[prediction_pb2.Feedback], response: Optiona def __repr__(self): return "Success:%s message:%s\nRequest:\n%s\nResponse:\n%s" % ( - self.success, self.msg, self.request, self.response) + self.success, + self.msg, + self.request, + self.response, + ) class SeldonClientCombine(object): @@ -95,9 +123,13 @@ class SeldonClientCombine(object): Data class to return from Seldon Client for aggregate calls """ - def __init__(self, request: Optional[prediction_pb2.SeldonMessageList], - response: Optional[prediction_pb2.SeldonMessage], - success: bool = True, msg: str = ""): + def __init__( + self, + request: Optional[prediction_pb2.SeldonMessageList], + response: Optional[prediction_pb2.SeldonMessage], + success: bool = True, + msg: str = "", + ): self.request = request self.response = response self.success = success @@ -105,7 +137,11 @@ def __init__(self, request: Optional[prediction_pb2.SeldonMessageList], def __repr__(self): return "Success:%s message:%s\nRequest:\n%s\nResponse:\n%s" % ( - self.success, self.msg, self.request, self.response) + self.success, + self.msg, + self.request, + self.response, + ) class SeldonClient(object): @@ -113,15 +149,25 @@ class SeldonClient(object): A reference Seldon API Client """ - def __init__(self, gateway: str = "ambassador", transport: str = "rest", namespace: str = None, - deployment_name: str = None, - payload_type: str = "tensor", oauth_key: str = None, oauth_secret: str = None, - seldon_rest_endpoint: str = "localhost:8002", seldon_grpc_endpoint: str = "localhost:8004", - gateway_endpoint: str = "localhost:8003", microservice_endpoint: str = "localhost:5000", - grpc_max_send_message_length: int = 4 * 1024 * 1024, - grpc_max_receive_message_length: int = 4 * 1024 * 1024, - channel_credentials: SeldonChannelCredentials =None, - call_credentials: SeldonCallCredentials = None, debug = False): + def __init__( + self, + gateway: str = "ambassador", + transport: str = "rest", + namespace: str = None, + deployment_name: str = None, + payload_type: str = "tensor", + oauth_key: str = None, + oauth_secret: str = None, + seldon_rest_endpoint: str = "localhost:8002", + seldon_grpc_endpoint: str = "localhost:8004", + gateway_endpoint: str = "localhost:8003", + microservice_endpoint: str = "localhost:5000", + grpc_max_send_message_length: int = 4 * 1024 * 1024, + grpc_max_receive_message_length: int = 4 * 1024 * 1024, + channel_credentials: SeldonChannelCredentials = None, + call_credentials: SeldonCallCredentials = None, + debug=False, + ): """ Parameters @@ -166,8 +212,14 @@ def _gather_args(self, **kwargs): c2.update({k: v for k, v in kwargs.items() if v is not None}) return c2 - def _validate_args(self, gateway: str = None, transport: str = None, - method: str = None, data: np.ndarray = None, **kwargs): + def _validate_args( + self, + gateway: str = None, + transport: str = None, + method: str = None, + data: np.ndarray = None, + **kwargs, + ): """ Internal method to validate parameters @@ -188,23 +240,51 @@ def _validate_args(self, gateway: str = None, transport: str = None, """ if not (gateway == "ambassador" or gateway == "seldon" or gateway == "istio"): - raise SeldonClientException("Valid values for gateway are 'ambassador', 'istio', or 'seldon'") + raise SeldonClientException( + "Valid values for gateway are 'ambassador', 'istio', or 'seldon'" + ) if not (transport == "rest" or transport == "grpc"): - raise SeldonClientException("Valid values for transport are 'rest' or 'grpc'") - if not (method == "predict" or method == "route" or method == "aggregate" or method == "transform-input" or - method == "transform-output" or method == "send-feedback" or method is None): raise SeldonClientException( - "Valid values for method are 'predict', 'route', 'transform-input', 'transform-output', 'aggregate' or None") + "Valid values for transport are 'rest' or 'grpc'" + ) + if not ( + method == "predict" + or method == "route" + or method == "aggregate" + or method == "transform-input" + or method == "transform-output" + or method == "send-feedback" + or method is None + ): + raise SeldonClientException( + "Valid values for method are 'predict', 'route', 'transform-input', 'transform-output', 'aggregate' or None" + ) if not (data is None or isinstance(data, np.ndarray)): raise SeldonClientException("Valid values for data are None or numpy array") - def predict(self, gateway: str = None, transport: str = None, deployment_name: str = None, - payload_type: str = None, oauth_key: str = None, oauth_secret: str = None, - seldon_rest_endpoint: str = None, seldon_grpc_endpoint: str = None, - gateway_endpoint: str = None, microservice_endpoint: str = None, - method: str = None, shape: Tuple = (1, 1), namespace: str = None, data: np.ndarray = None, - bin_data: Union[bytes, bytearray] = None, str_data: str = None, names: Iterable[str] = None, - gateway_prefix: str = None, headers: Dict = None, http_path: str = None) -> SeldonClientPrediction: + def predict( + self, + gateway: str = None, + transport: str = None, + deployment_name: str = None, + payload_type: str = None, + oauth_key: str = None, + oauth_secret: str = None, + seldon_rest_endpoint: str = None, + seldon_grpc_endpoint: str = None, + gateway_endpoint: str = None, + microservice_endpoint: str = None, + method: str = None, + shape: Tuple = (1, 1), + namespace: str = None, + data: np.ndarray = None, + bin_data: Union[bytes, bytearray] = None, + str_data: str = None, + names: Iterable[str] = None, + gateway_prefix: str = None, + headers: Dict = None, + http_path: str = None, + ) -> SeldonClientPrediction: """ Parameters @@ -254,14 +334,28 @@ def predict(self, gateway: str = None, transport: str = None, deployment_name: s ------- """ - k = self._gather_args(gateway=gateway, transport=transport, deployment_name=deployment_name, - payload_type=payload_type, oauth_key=oauth_key, - oauth_secret=oauth_secret, seldon_rest_endpoint=seldon_rest_endpoint, - seldon_grpc_endpoint=seldon_grpc_endpoint, gateway_endpoint=gateway_endpoint, - microservice_endpoint=microservice_endpoint, method=method, shape=shape, - namespace=namespace, names=names, - data=data, bin_data=bin_data, str_data=str_data, - gateway_prefix=gateway_prefix, headers=headers, http_path=http_path) + k = self._gather_args( + gateway=gateway, + transport=transport, + deployment_name=deployment_name, + payload_type=payload_type, + oauth_key=oauth_key, + oauth_secret=oauth_secret, + seldon_rest_endpoint=seldon_rest_endpoint, + seldon_grpc_endpoint=seldon_grpc_endpoint, + gateway_endpoint=gateway_endpoint, + microservice_endpoint=microservice_endpoint, + method=method, + shape=shape, + namespace=namespace, + names=names, + data=data, + bin_data=bin_data, + str_data=str_data, + gateway_prefix=gateway_prefix, + headers=headers, + http_path=http_path, + ) self._validate_args(**k) if k["gateway"] == "ambassador" or k["gateway"] == "istio": if k["transport"] == "rest": @@ -280,14 +374,26 @@ def predict(self, gateway: str = None, transport: str = None, deployment_name: s else: raise SeldonClientException("Unknown gateway " + k["gateway"]) - def feedback(self, prediction_request: prediction_pb2.SeldonMessage = None, - prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0, - gateway: str = None, transport: str = None, deployment_name: str = None, - payload_type: str = None, oauth_key: str = None, oauth_secret: str = None, - seldon_rest_endpoint: str = None, seldon_grpc_endpoint: str = None, - gateway_endpoint: str = None, microservice_endpoint: str = None, - method: str = None, shape: Tuple = (1, 1), namespace: str = None, - gateway_prefix: str = None) -> SeldonClientFeedback: + def feedback( + self, + prediction_request: prediction_pb2.SeldonMessage = None, + prediction_response: prediction_pb2.SeldonMessage = None, + reward: float = 0, + gateway: str = None, + transport: str = None, + deployment_name: str = None, + payload_type: str = None, + oauth_key: str = None, + oauth_secret: str = None, + seldon_rest_endpoint: str = None, + seldon_grpc_endpoint: str = None, + gateway_endpoint: str = None, + microservice_endpoint: str = None, + method: str = None, + shape: Tuple = (1, 1), + namespace: str = None, + gateway_prefix: str = None, + ) -> SeldonClientFeedback: """ Parameters @@ -333,40 +439,69 @@ def feedback(self, prediction_request: prediction_pb2.SeldonMessage = None, ------- """ - k = self._gather_args(gateway=gateway, transport=transport, deployment_name=deployment_name, - payload_type=payload_type, oauth_key=oauth_key, oauth_secret=oauth_secret, - seldon_rest_endpoint=seldon_rest_endpoint - , seldon_grpc_endpoint=seldon_grpc_endpoint, gateway_endpoint=gateway_endpoint, - microservice_endpoint=microservice_endpoint, method=method, shape=shape, - namespace=namespace, gateway_prefix=gateway_prefix) + k = self._gather_args( + gateway=gateway, + transport=transport, + deployment_name=deployment_name, + payload_type=payload_type, + oauth_key=oauth_key, + oauth_secret=oauth_secret, + seldon_rest_endpoint=seldon_rest_endpoint, + seldon_grpc_endpoint=seldon_grpc_endpoint, + gateway_endpoint=gateway_endpoint, + microservice_endpoint=microservice_endpoint, + method=method, + shape=shape, + namespace=namespace, + gateway_prefix=gateway_prefix, + ) self._validate_args(**k) if k["gateway"] == "ambassador" or k["gateway"] == "istio": if k["transport"] == "rest": - return rest_feedback_gateway(prediction_request, prediction_response, reward, **k) + return rest_feedback_gateway( + prediction_request, prediction_response, reward, **k + ) elif k["transport"] == "grpc": - return grpc_feedback_gateway(prediction_request, prediction_response, reward, **k) + return grpc_feedback_gateway( + prediction_request, prediction_response, reward, **k + ) else: raise SeldonClientException("Unknown transport " + k["transport"]) elif k["gateway"] == "seldon": if k["transport"] == "rest": - return rest_feedback_seldon_oauth(prediction_request, prediction_response, reward, **k) + return rest_feedback_seldon_oauth( + prediction_request, prediction_response, reward, **k + ) elif k["transport"] == "grpc": - return grpc_feedback_seldon_oauth(prediction_request, prediction_response, reward, **k) + return grpc_feedback_seldon_oauth( + prediction_request, prediction_response, reward, **k + ) else: raise SeldonClientException("Unknown transport " + k["transport"]) else: raise SeldonClientException("Unknown gateway " + k["gateway"]) - def explain(self, gateway: str = None, transport: str = None, deployment_name: str = None, - payload_type: str = None, - seldon_rest_endpoint: str = None, seldon_grpc_endpoint: str = None, - gateway_endpoint: str = None, microservice_endpoint: str = None, - method: str = None, shape: Tuple = (1, 1), namespace: str = None, - data: np.ndarray = None, - bin_data: Union[bytes, bytearray] = None, str_data: str = None, - names: Iterable[str] = None, - gateway_prefix: str = None, headers: Dict = None, - http_path: str = None) -> Dict: + def explain( + self, + gateway: str = None, + transport: str = None, + deployment_name: str = None, + payload_type: str = None, + seldon_rest_endpoint: str = None, + seldon_grpc_endpoint: str = None, + gateway_endpoint: str = None, + microservice_endpoint: str = None, + method: str = None, + shape: Tuple = (1, 1), + namespace: str = None, + data: np.ndarray = None, + bin_data: Union[bytes, bytearray] = None, + str_data: str = None, + names: Iterable[str] = None, + gateway_prefix: str = None, + headers: Dict = None, + http_path: str = None, + ) -> Dict: """ Parameters @@ -412,15 +547,26 @@ def explain(self, gateway: str = None, transport: str = None, deployment_name: s ------- """ - k = self._gather_args(gateway=gateway, transport=transport, deployment_name=deployment_name, - payload_type=payload_type, seldon_rest_endpoint=seldon_rest_endpoint, - seldon_grpc_endpoint=seldon_grpc_endpoint, - gateway_endpoint=gateway_endpoint, - microservice_endpoint=microservice_endpoint, method=method, - shape=shape, - namespace=namespace, names=names, - data=data, bin_data=bin_data, str_data=str_data, - gateway_prefix=gateway_prefix, headers=headers, http_path=http_path) + k = self._gather_args( + gateway=gateway, + transport=transport, + deployment_name=deployment_name, + payload_type=payload_type, + seldon_rest_endpoint=seldon_rest_endpoint, + seldon_grpc_endpoint=seldon_grpc_endpoint, + gateway_endpoint=gateway_endpoint, + microservice_endpoint=microservice_endpoint, + method=method, + shape=shape, + namespace=namespace, + names=names, + data=data, + bin_data=bin_data, + str_data=str_data, + gateway_prefix=gateway_prefix, + headers=headers, + http_path=http_path, + ) self._validate_args(**k) if k["gateway"] == "ambassador" or k["gateway"] == "istio": if k["transport"] == "rest": @@ -432,16 +578,28 @@ def explain(self, gateway: str = None, transport: str = None, deployment_name: s else: raise SeldonClientException("Unknown gateway " + k["gateway"]) - - - - def microservice(self, gateway: str = None, transport: str = None, deployment_name: str = None, - payload_type: str = None, oauth_key: str = None, oauth_secret: str = None, - seldon_rest_endpoint: str = None, seldon_grpc_endpoint: str = None, - gateway_endpoint: str = None, microservice_endpoint: str = None, - method: str = None, shape: Tuple = (1, 1), namespace: str = None, data: np.ndarray = None, - datas: List[np.ndarray] = None, ndatas: int = None, bin_data: Union[bytes, bytearray] = None, - str_data: str = None, names: Iterable[str] = None) -> Union[SeldonClientPrediction, SeldonClientCombine]: + def microservice( + self, + gateway: str = None, + transport: str = None, + deployment_name: str = None, + payload_type: str = None, + oauth_key: str = None, + oauth_secret: str = None, + seldon_rest_endpoint: str = None, + seldon_grpc_endpoint: str = None, + gateway_endpoint: str = None, + microservice_endpoint: str = None, + method: str = None, + shape: Tuple = (1, 1), + namespace: str = None, + data: np.ndarray = None, + datas: List[np.ndarray] = None, + ndatas: int = None, + bin_data: Union[bytes, bytearray] = None, + str_data: str = None, + names: Iterable[str] = None, + ) -> Union[SeldonClientPrediction, SeldonClientCombine]: """ Parameters @@ -496,25 +654,47 @@ def microservice(self, gateway: str = None, transport: str = None, deployment_na A prediction result """ - k = self._gather_args(gateway=gateway, transport=transport, deployment_name=deployment_name, - payload_type=payload_type, oauth_key=oauth_key, - oauth_secret=oauth_secret, seldon_rest_endpoint=seldon_rest_endpoint, - seldon_grpc_endpoint=seldon_grpc_endpoint, gateway_endpoint=gateway_endpoint, - microservice_endpoint=microservice_endpoint, method=method, shape=shape, - namespace=namespace, datas=datas, ndatas=ndatas, names=names, - data=data, bin_data=bin_data, str_data=str_data) + k = self._gather_args( + gateway=gateway, + transport=transport, + deployment_name=deployment_name, + payload_type=payload_type, + oauth_key=oauth_key, + oauth_secret=oauth_secret, + seldon_rest_endpoint=seldon_rest_endpoint, + seldon_grpc_endpoint=seldon_grpc_endpoint, + gateway_endpoint=gateway_endpoint, + microservice_endpoint=microservice_endpoint, + method=method, + shape=shape, + namespace=namespace, + datas=datas, + ndatas=ndatas, + names=names, + data=data, + bin_data=bin_data, + str_data=str_data, + ) self._validate_args(**k) if k["transport"] == "rest": - if k["method"] == "predict" or k["method"] == "transform-input" or k["method"] == "transform-output" or k[ - "method"] == "route": + if ( + k["method"] == "predict" + or k["method"] == "transform-input" + or k["method"] == "transform-output" + or k["method"] == "route" + ): return microservice_api_rest_seldon_message(**k) elif k["method"] == "aggregate": return microservice_api_rest_aggregate(**k) else: raise SeldonClientException("Unknown method " + k["method"]) elif k["transport"] == "grpc": - if k["method"] == "predict" or k["method"] == "transform-input" or k["method"] == "transform-output" or k[ - "method"] == "route": + if ( + k["method"] == "predict" + or k["method"] == "transform-input" + or k["method"] == "transform-output" + or k["method"] == "route" + ): return microservice_api_grpc_seldon_message(**k) elif k["method"] == "aggregate": return microservice_api_grpc_aggregate(**k) @@ -523,15 +703,25 @@ def microservice(self, gateway: str = None, transport: str = None, deployment_na else: raise SeldonClientException("Unknown transport " + k["transport"]) - def microservice_feedback(self, prediction_request: prediction_pb2.SeldonMessage = None, - prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0, - gateway: str = None, transport: str = None, deployment_name: str = None, - payload_type: str = None, oauth_key: str = None, oauth_secret: str = None, - seldon_rest_endpoint: str = None, - seldon_grpc_endpoint: str = None, - gateway_endpoint: str = None, - microservice_endpoint: str = None, - method: str = None, shape: Tuple = (1, 1), namespace: str = None) -> SeldonClientFeedback: + def microservice_feedback( + self, + prediction_request: prediction_pb2.SeldonMessage = None, + prediction_response: prediction_pb2.SeldonMessage = None, + reward: float = 0, + gateway: str = None, + transport: str = None, + deployment_name: str = None, + payload_type: str = None, + oauth_key: str = None, + oauth_secret: str = None, + seldon_rest_endpoint: str = None, + seldon_grpc_endpoint: str = None, + gateway_endpoint: str = None, + microservice_endpoint: str = None, + method: str = None, + shape: Tuple = (1, 1), + namespace: str = None, + ) -> SeldonClientFeedback: """ Parameters @@ -578,25 +768,43 @@ def microservice_feedback(self, prediction_request: prediction_pb2.SeldonMessage A client response """ - k = self._gather_args(gateway=gateway, transport=transport, deployment_name=deployment_name, - payload_type=payload_type, oauth_key=oauth_key, oauth_secret=oauth_secret, - seldon_rest_endpoint=seldon_rest_endpoint - , seldon_grpc_endpoint=seldon_grpc_endpoint, gateway_endpoint=gateway_endpoint, - microservice_endpoint=microservice_endpoint, method=method, shape=shape, - namespace=namespace) + k = self._gather_args( + gateway=gateway, + transport=transport, + deployment_name=deployment_name, + payload_type=payload_type, + oauth_key=oauth_key, + oauth_secret=oauth_secret, + seldon_rest_endpoint=seldon_rest_endpoint, + seldon_grpc_endpoint=seldon_grpc_endpoint, + gateway_endpoint=gateway_endpoint, + microservice_endpoint=microservice_endpoint, + method=method, + shape=shape, + namespace=namespace, + ) self._validate_args(**k) if k["transport"] == "rest": - return microservice_api_rest_feedback(prediction_request, prediction_response, reward, **k) + return microservice_api_rest_feedback( + prediction_request, prediction_response, reward, **k + ) else: - return microservice_api_grpc_feedback(prediction_request, prediction_response, reward, **k) - - -def microservice_api_rest_seldon_message(method: str = "predict", microservice_endpoint: str = "localhost:5000", - shape: Tuple = (1, 1), - data: object = None, payload_type: str = "tensor", - bin_data: Union[bytes, bytearray] = None, str_data: str = None, - names: Iterable[str] = None, - **kwargs) -> SeldonClientPrediction: + return microservice_api_grpc_feedback( + prediction_request, prediction_response, reward, **k + ) + + +def microservice_api_rest_seldon_message( + method: str = "predict", + microservice_endpoint: str = "localhost:5000", + shape: Tuple = (1, 1), + data: object = None, + payload_type: str = "tensor", + bin_data: Union[bytes, bytearray] = None, + str_data: str = None, + names: Iterable[str] = None, + **kwargs, +) -> SeldonClientPrediction: """ Call Seldon microservice REST API @@ -646,7 +854,8 @@ def microservice_api_rest_seldon_message(method: str = "predict", microservice_e payload = seldon_message_to_json(request) response_raw = requests.post( "http://" + microservice_endpoint + "/" + method, - data={"json": json.dumps(payload)}) + data={"json": json.dumps(payload)}, + ) if response_raw.status_code == 200: success = True msg = "" @@ -660,11 +869,15 @@ def microservice_api_rest_seldon_message(method: str = "predict", microservice_e return SeldonClientPrediction(request, None, success, str(e)) -def microservice_api_rest_aggregate(microservice_endpoint: str = "localhost:5000", - shape: Tuple = (1, 1), - datas: List[np.ndarray] = None, ndatas: int = None, payload_type: str = "tensor", - names: Iterable[str] = None, - **kwargs) -> SeldonClientCombine: +def microservice_api_rest_aggregate( + microservice_endpoint: str = "localhost:5000", + shape: Tuple = (1, 1), + datas: List[np.ndarray] = None, + ndatas: int = None, + payload_type: str = "tensor", + names: Iterable[str] = None, + **kwargs, +) -> SeldonClientCombine: """ Call Seldon microservice REST API aggregate endpoint @@ -707,7 +920,8 @@ def microservice_api_rest_aggregate(microservice_endpoint: str = "localhost:5000 payload = seldon_messages_to_json(request) response_raw = requests.post( "http://" + microservice_endpoint + "/aggregate", - data={"json": json.dumps(payload)}) + data={"json": json.dumps(payload)}, + ) if response_raw.status_code == 200: success = True msg = "" @@ -721,9 +935,13 @@ def microservice_api_rest_aggregate(microservice_endpoint: str = "localhost:5000 return SeldonClientCombine(request, None, success, str(e)) -def microservice_api_rest_feedback(prediction_request: prediction_pb2.SeldonMessage = None, - prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0, - microservice_endpoint: str = None, **kwargs) -> SeldonClientFeedback: +def microservice_api_rest_feedback( + prediction_request: prediction_pb2.SeldonMessage = None, + prediction_response: prediction_pb2.SeldonMessage = None, + reward: float = 0, + microservice_endpoint: str = None, + **kwargs, +) -> SeldonClientFeedback: """ Call Seldon microserice REST API to send feedback @@ -743,11 +961,14 @@ def microservice_api_rest_feedback(prediction_request: prediction_pb2.SeldonMess ------- A SeldonClientFeedback """ - request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward) + request = prediction_pb2.Feedback( + request=prediction_request, response=prediction_response, reward=reward + ) payload = feedback_to_json(request) response_raw = requests.post( "http://" + microservice_endpoint + "/send-feedback", - data={"json": json.dumps(payload)}) + data={"json": json.dumps(payload)}, + ) if response_raw.status_code == 200: success = True msg = "" @@ -761,14 +982,19 @@ def microservice_api_rest_feedback(prediction_request: prediction_pb2.SeldonMess return SeldonClientFeedback(request, None, success, str(e)) -def microservice_api_grpc_seldon_message(method: str = "predict", microservice_endpoint: str = "localhost:5000", - shape: Tuple = (1, 1), - data: object = None, payload_type: str = "tensor", - bin_data: Union[bytes, bytearray] = None, str_data: str = None, - grpc_max_send_message_length: int = 4 * 1024 * 1024, - grpc_max_receive_message_length: int = 4 * 1024 * 1024, - names: Iterable[str] = None, - **kwargs) -> SeldonClientPrediction: +def microservice_api_grpc_seldon_message( + method: str = "predict", + microservice_endpoint: str = "localhost:5000", + shape: Tuple = (1, 1), + data: object = None, + payload_type: str = "tensor", + bin_data: Union[bytes, bytearray] = None, + str_data: str = None, + grpc_max_send_message_length: int = 4 * 1024 * 1024, + grpc_max_receive_message_length: int = 4 * 1024 * 1024, + names: Iterable[str] = None, + **kwargs, +) -> SeldonClientPrediction: """ Call Seldon microservice gRPC API @@ -809,9 +1035,13 @@ def microservice_api_grpc_seldon_message(method: str = "predict", microservice_e data = np.random.rand(*shape) datadef = array_to_grpc_datadef(payload_type, data, names=names) request = prediction_pb2.SeldonMessage(data=datadef) - channel = grpc.insecure_channel(microservice_endpoint, options=[ - ('grpc.max_send_message_length', grpc_max_send_message_length), - ('grpc.max_receive_message_length', grpc_max_receive_message_length)]) + channel = grpc.insecure_channel( + microservice_endpoint, + options=[ + ("grpc.max_send_message_length", grpc_max_send_message_length), + ("grpc.max_receive_message_length", grpc_max_receive_message_length), + ], + ) try: if method == "predict": stub_model = prediction_pb2_grpc.ModelStub(channel) @@ -833,13 +1063,17 @@ def microservice_api_grpc_seldon_message(method: str = "predict", microservice_e return SeldonClientPrediction(request, None, False, str(e)) -def microservice_api_grpc_aggregate(microservice_endpoint: str = "localhost:5000", - shape: Tuple = (1, 1), - datas: List[np.ndarray] = None, ndatas: int = None, payload_type: str = "tensor", - grpc_max_send_message_length: int = 4 * 1024 * 1024, - grpc_max_receive_message_length: int = 4 * 1024 * 1024, - names: Iterable[str] = None, - **kwargs) -> SeldonClientCombine: +def microservice_api_grpc_aggregate( + microservice_endpoint: str = "localhost:5000", + shape: Tuple = (1, 1), + datas: List[np.ndarray] = None, + ndatas: int = None, + payload_type: str = "tensor", + grpc_max_send_message_length: int = 4 * 1024 * 1024, + grpc_max_receive_message_length: int = 4 * 1024 * 1024, + names: Iterable[str] = None, + **kwargs, +) -> SeldonClientCombine: """ Call Seldon microservice gRPC API aggregate @@ -884,9 +1118,13 @@ def microservice_api_grpc_aggregate(microservice_endpoint: str = "localhost:5000 msgs.append(prediction_pb2.SeldonMessage(data=datadef)) request = prediction_pb2.SeldonMessageList(seldonMessages=msgs) try: - channel = grpc.insecure_channel(microservice_endpoint, options=[ - ('grpc.max_send_message_length', grpc_max_send_message_length), - ('grpc.max_receive_message_length', grpc_max_receive_message_length)]) + channel = grpc.insecure_channel( + microservice_endpoint, + options=[ + ("grpc.max_send_message_length", grpc_max_send_message_length), + ("grpc.max_receive_message_length", grpc_max_receive_message_length), + ], + ) stub = prediction_pb2_grpc.GenericStub(channel) response = stub.Aggregate(request=request) return SeldonClientCombine(request, response, True, "") @@ -894,12 +1132,15 @@ def microservice_api_grpc_aggregate(microservice_endpoint: str = "localhost:5000 return SeldonClientCombine(request, None, False, str(e)) -def microservice_api_grpc_feedback(prediction_request: prediction_pb2.SeldonMessage = None, - prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0, - microservice_endpoint: str = None, - grpc_max_send_message_length: int = 4 * 1024 * 1024, - grpc_max_receive_message_length: int = 4 * 1024 * 1024, - **kwargs) -> SeldonClientFeedback: +def microservice_api_grpc_feedback( + prediction_request: prediction_pb2.SeldonMessage = None, + prediction_response: prediction_pb2.SeldonMessage = None, + reward: float = 0, + microservice_endpoint: str = None, + grpc_max_send_message_length: int = 4 * 1024 * 1024, + grpc_max_receive_message_length: int = 4 * 1024 * 1024, + **kwargs, +) -> SeldonClientFeedback: """ Call Seldon gRPC @@ -919,11 +1160,17 @@ def microservice_api_grpc_feedback(prediction_request: prediction_pb2.SeldonMess ------- """ - request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward) + request = prediction_pb2.Feedback( + request=prediction_request, response=prediction_response, reward=reward + ) try: - channel = grpc.insecure_channel(microservice_endpoint, options=[ - ('grpc.max_send_message_length', grpc_max_send_message_length), - ('grpc.max_receive_message_length', grpc_max_receive_message_length)]) + channel = grpc.insecure_channel( + microservice_endpoint, + options=[ + ("grpc.max_send_message_length", grpc_max_send_message_length), + ("grpc.max_receive_message_length", grpc_max_receive_message_length), + ], + ) stub = prediction_pb2_grpc.GenericStub(channel) response = stub.SendFeedback(request=request) return SeldonClientFeedback(request, response, True, "") @@ -935,8 +1182,13 @@ def microservice_api_grpc_feedback(prediction_request: prediction_pb2.SeldonMess # External API # -def get_token(oauth_key: str = "", oauth_secret: str = "", namespace: str = None, - endpoint: str = "localhost:8002") -> str: + +def get_token( + oauth_key: str = "", + oauth_secret: str = "", + namespace: str = None, + endpoint: str = "localhost:8002", +) -> str: """ Get an OAUTH key from the Seldon Gateway @@ -955,7 +1207,7 @@ def get_token(oauth_key: str = "", oauth_secret: str = "", namespace: str = None The OAUTH token """ - payload = {'grant_type': 'client_credentials'} + payload = {"grant_type": "client_credentials"} if namespace is None: key = oauth_key else: @@ -963,21 +1215,29 @@ def get_token(oauth_key: str = "", oauth_secret: str = "", namespace: str = None response = requests.post( "http://" + endpoint + "/oauth/token", auth=HTTPBasicAuth(key, oauth_secret), - data=payload) + data=payload, + ) if response.status_code == 200: token = response.json()["access_token"] return token else: - print("Failed to get token:"+response.text) + print("Failed to get token:" + response.text) raise SeldonClientException(response.text) -def rest_predict_seldon_oauth(oauth_key: str, oauth_secret: str, namespace: str = None, - seldon_rest_endpoint: str = "localhost:8002", shape: Tuple = (1, 1), - data: object = None, payload_type: str = "tensor", - bin_data: Union[bytes, bytearray] = None, str_data: str = None, - names: Iterable[str] = None, - **kwargs) -> SeldonClientPrediction: +def rest_predict_seldon_oauth( + oauth_key: str, + oauth_secret: str, + namespace: str = None, + seldon_rest_endpoint: str = "localhost:8002", + shape: Tuple = (1, 1), + data: object = None, + payload_type: str = "tensor", + bin_data: Union[bytes, bytearray] = None, + str_data: str = None, + names: Iterable[str] = None, + **kwargs, +) -> SeldonClientPrediction: """ Call Seldon API Gateway using REST @@ -1020,12 +1280,13 @@ def rest_predict_seldon_oauth(oauth_key: str, oauth_secret: str, namespace: str data = np.random.rand(*shape) datadef = array_to_grpc_datadef(payload_type, data, names=names) request = prediction_pb2.SeldonMessage(data=datadef) - headers = {'Authorization': 'Bearer ' + token} + headers = {"Authorization": "Bearer " + token} payload = seldon_message_to_json(request) response_raw = requests.post( "http://" + seldon_rest_endpoint + "/api/v0.1/predictions", headers=headers, - json=payload) + json=payload, + ) if response_raw.status_code == 200: success = True msg = "" @@ -1045,15 +1306,22 @@ def rest_predict_seldon_oauth(oauth_key: str, oauth_secret: str, namespace: str return SeldonClientPrediction(request, None, False, str(e)) -def grpc_predict_seldon_oauth(oauth_key: str, oauth_secret: str, namespace: str = None, - seldon_rest_endpoint: str = "localhost:8002", - seldon_grpc_endpoint: str = "localhost:8004", shape: Tuple[int, int] = (1, 1), - data: np.ndarray = None, payload_type: str = "tensor", - bin_data: Union[bytes, bytearray] = None, str_data: str = None, - grpc_max_send_message_length: int = 4 * 1024 * 1024, - grpc_max_receive_message_length: int = 4 * 1024 * 1024, - names: Iterable[str] = None, - **kwargs) -> SeldonClientPrediction: +def grpc_predict_seldon_oauth( + oauth_key: str, + oauth_secret: str, + namespace: str = None, + seldon_rest_endpoint: str = "localhost:8002", + seldon_grpc_endpoint: str = "localhost:8004", + shape: Tuple[int, int] = (1, 1), + data: np.ndarray = None, + payload_type: str = "tensor", + bin_data: Union[bytes, bytearray] = None, + str_data: str = None, + grpc_max_send_message_length: int = 4 * 1024 * 1024, + grpc_max_receive_message_length: int = 4 * 1024 * 1024, + names: Iterable[str] = None, + **kwargs, +) -> SeldonClientPrediction: """ Call Seldon gRPC API Gateway endpoint @@ -1100,11 +1368,15 @@ def grpc_predict_seldon_oauth(oauth_key: str, oauth_secret: str, namespace: str data = np.random.rand(*shape) datadef = array_to_grpc_datadef(payload_type, data, names=names) request = prediction_pb2.SeldonMessage(data=datadef) - channel = grpc.insecure_channel(seldon_grpc_endpoint, options=[ - ('grpc.max_send_message_length', grpc_max_send_message_length), - ('grpc.max_receive_message_length', grpc_max_receive_message_length)]) + channel = grpc.insecure_channel( + seldon_grpc_endpoint, + options=[ + ("grpc.max_send_message_length", grpc_max_send_message_length), + ("grpc.max_receive_message_length", grpc_max_receive_message_length), + ], + ) stub = prediction_pb2_grpc.SeldonStub(channel) - metadata = [('oauth_token', token)] + metadata = [("oauth_token", token)] try: response = stub.Predict(request=request, metadata=metadata) return SeldonClientPrediction(request, response, True, "") @@ -1112,14 +1384,23 @@ def grpc_predict_seldon_oauth(oauth_key: str, oauth_secret: str, namespace: str return SeldonClientPrediction(request, None, False, str(e)) -def rest_predict_gateway(deployment_name: str, namespace: str = None, gateway_endpoint: str = "localhost:8003", - shape: Tuple[int, int] = (1, 1), - data: np.ndarray = None, headers: Dict = None, gateway_prefix: str = None, - payload_type: str = "tensor", - bin_data: Union[bytes, bytearray] = None, str_data: str = None, - names: Iterable[str] = None, call_credentials: SeldonCallCredentials = None, - channel_credentials: SeldonChannelCredentials= None, http_path: str = None, - **kwargs) -> SeldonClientPrediction: +def rest_predict_gateway( + deployment_name: str, + namespace: str = None, + gateway_endpoint: str = "localhost:8003", + shape: Tuple[int, int] = (1, 1), + data: np.ndarray = None, + headers: Dict = None, + gateway_prefix: str = None, + payload_type: str = "tensor", + bin_data: Union[bytes, bytearray] = None, + str_data: str = None, + names: Iterable[str] = None, + call_credentials: SeldonCallCredentials = None, + channel_credentials: SeldonChannelCredentials = None, + http_path: str = None, + **kwargs, +) -> SeldonClientPrediction: """ REST request to Gateway Ingress @@ -1182,15 +1463,46 @@ def rest_predict_gateway(deployment_name: str, namespace: str = None, gateway_en if not call_credentials.token is None: req_headers["X-Auth-Token"] = call_credentials.token if http_path is not None: - url = url = scheme+"://" + gateway_endpoint + "/seldon/" + namespace + "/" + deployment_name + http_path + url = url = ( + scheme + + "://" + + gateway_endpoint + + "/seldon/" + + namespace + + "/" + + deployment_name + + http_path + ) else: if gateway_prefix is None: if namespace is None: - url = scheme + "://" + gateway_endpoint + "/seldon/" + deployment_name + "/api/v0.1/predictions" + url = ( + scheme + + "://" + + gateway_endpoint + + "/seldon/" + + deployment_name + + "/api/v0.1/predictions" + ) else: - url = scheme+"://" + gateway_endpoint + "/seldon/" + namespace + "/" + deployment_name + "/api/v0.1/predictions" + url = ( + scheme + + "://" + + gateway_endpoint + + "/seldon/" + + namespace + + "/" + + deployment_name + + "/api/v0.1/predictions" + ) else: - url = scheme+"://" + gateway_endpoint + gateway_prefix + "/api/v0.1/predictions" + url = ( + scheme + + "://" + + gateway_endpoint + + gateway_prefix + + "/api/v0.1/predictions" + ) verify = True cert = None if not channel_credentials is None: @@ -1199,14 +1511,14 @@ def rest_predict_gateway(deployment_name: str, namespace: str = None, gateway_en else: verify = channel_credentials.verify if not channel_credentials.private_key_file is None: - cert = (channel_credentials.root_certificates_file, channel_credentials.private_key_file) - logger.debug("URL is "+url) + cert = ( + channel_credentials.root_certificates_file, + channel_credentials.private_key_file, + ) + logger.debug("URL is " + url) response_raw = requests.post( - url, - json=payload, - headers=req_headers, - verify=verify, - cert=cert) + url, json=payload, headers=req_headers, verify=verify, cert=cert + ) if response_raw.status_code == 200: success = True msg = "" @@ -1227,14 +1539,23 @@ def rest_predict_gateway(deployment_name: str, namespace: str = None, gateway_en return SeldonClientPrediction(request, None, False, str(e)) -def explain_predict_gateway(deployment_name: str, namespace: str = None, gateway_endpoint: str = "localhost:8003", - shape: Tuple[int, int] = (1, 1), - data: np.ndarray = None, headers: Dict = None, gateway_prefix: str = None, - payload_type: str = "tensor", - bin_data: Union[bytes, bytearray] = None, str_data: str = None, - names: Iterable[str] = None, call_credentials: SeldonCallCredentials = None, - channel_credentials: SeldonChannelCredentials= None, http_path: str = None, - **kwargs) -> Dict: +def explain_predict_gateway( + deployment_name: str, + namespace: str = None, + gateway_endpoint: str = "localhost:8003", + shape: Tuple[int, int] = (1, 1), + data: np.ndarray = None, + headers: Dict = None, + gateway_prefix: str = None, + payload_type: str = "tensor", + bin_data: Union[bytes, bytearray] = None, + str_data: str = None, + names: Iterable[str] = None, + call_credentials: SeldonCallCredentials = None, + channel_credentials: SeldonChannelCredentials = None, + http_path: str = None, + **kwargs, +) -> Dict: """ REST explain request to Gateway Ingress @@ -1297,15 +1618,52 @@ def explain_predict_gateway(deployment_name: str, namespace: str = None, gateway if not call_credentials.token is None: req_headers["X-Auth-Token"] = call_credentials.token if http_path is not None: - url = url = scheme+"://" + gateway_endpoint + "/seldon/" + namespace + "/" + deployment_name + http_path + url = url = ( + scheme + + "://" + + gateway_endpoint + + "/seldon/" + + namespace + + "/" + + deployment_name + + http_path + ) else: if gateway_prefix is None: if namespace is None: - url = scheme + "://" + gateway_endpoint + "/seldon/" + deployment_name + "-explainer/models/" + deployment_name+ ":explain" + url = ( + scheme + + "://" + + gateway_endpoint + + "/seldon/" + + deployment_name + + "-explainer/models/" + + deployment_name + + ":explain" + ) else: - url = scheme+"://" + gateway_endpoint + "/seldon/" + namespace + "/" + deployment_name + "-explainer/models/" + deployment_name+ ":explain" + url = ( + scheme + + "://" + + gateway_endpoint + + "/seldon/" + + namespace + + "/" + + deployment_name + + "-explainer/models/" + + deployment_name + + ":explain" + ) else: - url = scheme+"://" + gateway_endpoint + gateway_prefix + + "/models/" + deployment_name+ ":explain" + url = ( + scheme + + "://" + + gateway_endpoint + + gateway_prefix + + +"/models/" + + deployment_name + + ":explain" + ) verify = True cert = None if not channel_credentials is None: @@ -1314,29 +1672,37 @@ def explain_predict_gateway(deployment_name: str, namespace: str = None, gateway else: verify = channel_credentials.verify if not channel_credentials.private_key_file is None: - cert = (channel_credentials.root_certificates_file, channel_credentials.private_key_file) - logger.debug("URL is "+url) + cert = ( + channel_credentials.root_certificates_file, + channel_credentials.private_key_file, + ) + logger.debug("URL is " + url) response_raw = requests.post( - url, - json=payload, - headers=req_headers, - verify=verify, - cert=cert) + url, json=payload, headers=req_headers, verify=verify, cert=cert + ) if response_raw.status_code == 200: return response_raw.json() else: - return {"success":False,"response_code":response_raw.status_code} - -def grpc_predict_gateway(deployment_name: str, namespace: str = None, gateway_endpoint: str = "localhost:8003", - shape: Tuple[int, int] = (1, 1), - data: np.ndarray = None, - headers: Dict = None, payload_type: str = "tensor", - bin_data: Union[bytes, bytearray] = None, str_data: str = None, - grpc_max_send_message_length: int = 4 * 1024 * 1024, - grpc_max_receive_message_length: int = 4 * 1024 * 1024, - names: Iterable[str] = None, call_credentials: SeldonCallCredentials = None, - channel_credentials: SeldonChannelCredentials= None, - **kwargs) -> SeldonClientPrediction: + return {"success": False, "response_code": response_raw.status_code} + + +def grpc_predict_gateway( + deployment_name: str, + namespace: str = None, + gateway_endpoint: str = "localhost:8003", + shape: Tuple[int, int] = (1, 1), + data: np.ndarray = None, + headers: Dict = None, + payload_type: str = "tensor", + bin_data: Union[bytes, bytearray] = None, + str_data: str = None, + grpc_max_send_message_length: int = 4 * 1024 * 1024, + grpc_max_receive_message_length: int = 4 * 1024 * 1024, + names: Iterable[str] = None, + call_credentials: SeldonCallCredentials = None, + channel_credentials: SeldonChannelCredentials = None, + **kwargs, +) -> SeldonClientPrediction: """ gRPC request to Gateway Ingress @@ -1387,44 +1753,62 @@ def grpc_predict_gateway(deployment_name: str, namespace: str = None, gateway_en datadef = array_to_grpc_datadef(payload_type, data, names=names) request = prediction_pb2.SeldonMessage(data=datadef) options = [ - ('grpc.max_send_message_length', grpc_max_send_message_length), - ('grpc.max_receive_message_length', grpc_max_receive_message_length)] + ("grpc.max_send_message_length", grpc_max_send_message_length), + ("grpc.max_receive_message_length", grpc_max_receive_message_length), + ] if channel_credentials is None: channel = grpc.insecure_channel(gateway_endpoint, options) else: # If one of root cert & cert chain are provided, both must be provided # otherwise there is a null pointer exception in the Go underlying impl - if (channel_credentials.private_key_file - and channel_credentials.root_certificates_file - and channel_credentials.certificate_chain_file): + if ( + channel_credentials.private_key_file + and channel_credentials.root_certificates_file + and channel_credentials.certificate_chain_file + ): grpc_channel_credentials = grpc.ssl_channel_credentials( - root_certificates=open(channel_credentials.root_certificates_file, 'rb').read(), - private_key=open(channel_credentials.private_key_file, 'rb').read(), - certificate_chain=open(channel_credentials.certificate_chain_file, 'rb').read()) + root_certificates=open( + channel_credentials.root_certificates_file, "rb" + ).read(), + private_key=open(channel_credentials.private_key_file, "rb").read(), + certificate_chain=open( + channel_credentials.certificate_chain_file, "rb" + ).read(), + ) # For most usecases only providing the root cert file is enough elif channel_credentials.root_certificates_file: grpc_channel_credentials = grpc.ssl_channel_credentials( - root_certificates=open(channel_credentials.root_certificates_file, 'rb').read()) + root_certificates=open( + channel_credentials.root_certificates_file, "rb" + ).read() + ) # This piece also allows for blank SSL Channel credentials in case this is required else: grpc_channel_credentials = grpc.ssl_channel_credentials() if channel_credentials.verify == False: # If Verify is set to false then we add the SSL Target Name Override option - options += [('grpc.ssl_target_name_override', gateway_endpoint.split(":")[0])] + options += [ + ("grpc.ssl_target_name_override", gateway_endpoint.split(":")[0]) + ] if not call_credentials is None: grpc_call_credentials = grpc.metadata_call_credentials( - lambda context, callback: callback((("x-auth-token", call_credentials.token),), None)) - credentials = grpc.composite_channel_credentials(grpc_channel_credentials, grpc_call_credentials) + lambda context, callback: callback( + (("x-auth-token", call_credentials.token),), None + ) + ) + credentials = grpc.composite_channel_credentials( + grpc_channel_credentials, grpc_call_credentials + ) else: credentials = grpc_channel_credentials logger.debug(f"Sending GRPC Request to endpoint: {gateway_endpoint}") channel = grpc.secure_channel(gateway_endpoint, credentials, options) stub = prediction_pb2_grpc.SeldonStub(channel) if namespace is None: - metadata = [('seldon', deployment_name)] + metadata = [("seldon", deployment_name)] else: - metadata = [('seldon', deployment_name), ('namespace', namespace)] + metadata = [("seldon", deployment_name), ("namespace", namespace)] if not headers is None: for k in headers: metadata.append((k, headers[k])) @@ -1432,10 +1816,16 @@ def grpc_predict_gateway(deployment_name: str, namespace: str = None, gateway_en return SeldonClientPrediction(request, response, True, "") -def rest_feedback_seldon_oauth(prediction_request: prediction_pb2.SeldonMessage = None, - prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0, - oauth_key: str = "", oauth_secret: str = "", namespace: str = None, - seldon_rest_endpoint: str = "localhost:8002", **kwargs) -> SeldonClientFeedback: +def rest_feedback_seldon_oauth( + prediction_request: prediction_pb2.SeldonMessage = None, + prediction_response: prediction_pb2.SeldonMessage = None, + reward: float = 0, + oauth_key: str = "", + oauth_secret: str = "", + namespace: str = None, + seldon_rest_endpoint: str = "localhost:8002", + **kwargs, +) -> SeldonClientFeedback: """ Send Feedback to Seldon API Gateway using REST @@ -1462,13 +1852,16 @@ def rest_feedback_seldon_oauth(prediction_request: prediction_pb2.SeldonMessage """ token = get_token(oauth_key, oauth_secret, namespace, seldon_rest_endpoint) - headers = {'Authorization': 'Bearer ' + token} - request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward) + headers = {"Authorization": "Bearer " + token} + request = prediction_pb2.Feedback( + request=prediction_request, response=prediction_response, reward=reward + ) payload = feedback_to_json(request) response_raw = requests.post( "http://" + seldon_rest_endpoint + "/api/v0.1/feedback", headers=headers, - json=payload) + json=payload, + ) if response_raw.status_code == 200: success = True msg = "" @@ -1488,14 +1881,19 @@ def rest_feedback_seldon_oauth(prediction_request: prediction_pb2.SeldonMessage return SeldonClientFeedback(request, None, False, str(e)) -def grpc_feedback_seldon_oauth(prediction_request: prediction_pb2.SeldonMessage = None, - prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0, - oauth_key: str = "", oauth_secret: str = "", namespace: str = None, - seldon_rest_endpoint: str = "localhost:8002", - seldon_grpc_endpoint: str = "localhost:8004", - grpc_max_send_message_length: int = 4 * 1024 * 1024, - grpc_max_receive_message_length: int = 4 * 1024 * 1024, - **kwargs) -> SeldonClientFeedback: +def grpc_feedback_seldon_oauth( + prediction_request: prediction_pb2.SeldonMessage = None, + prediction_response: prediction_pb2.SeldonMessage = None, + reward: float = 0, + oauth_key: str = "", + oauth_secret: str = "", + namespace: str = None, + seldon_rest_endpoint: str = "localhost:8002", + seldon_grpc_endpoint: str = "localhost:8004", + grpc_max_send_message_length: int = 4 * 1024 * 1024, + grpc_max_receive_message_length: int = 4 * 1024 * 1024, + **kwargs, +) -> SeldonClientFeedback: """ Send feedback to Seldon API gateway via gRPC @@ -1528,12 +1926,18 @@ def grpc_feedback_seldon_oauth(prediction_request: prediction_pb2.SeldonMessage """ token = get_token(oauth_key, oauth_secret, namespace, seldon_rest_endpoint) - request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward) - channel = grpc.insecure_channel(seldon_grpc_endpoint, options=[ - ('grpc.max_send_message_length', grpc_max_send_message_length), - ('grpc.max_receive_message_length', grpc_max_receive_message_length)]) + request = prediction_pb2.Feedback( + request=prediction_request, response=prediction_response, reward=reward + ) + channel = grpc.insecure_channel( + seldon_grpc_endpoint, + options=[ + ("grpc.max_send_message_length", grpc_max_send_message_length), + ("grpc.max_receive_message_length", grpc_max_receive_message_length), + ], + ) stub = prediction_pb2_grpc.SeldonStub(channel) - metadata = [('oauth_token', token)] + metadata = [("oauth_token", token)] try: response = stub.SendFeedback(request=request, metadata=metadata) return SeldonClientFeedback(request, response, True, "") @@ -1541,11 +1945,17 @@ def grpc_feedback_seldon_oauth(prediction_request: prediction_pb2.SeldonMessage return SeldonClientFeedback(request, None, False, str(e)) -def rest_feedback_gateway(prediction_request: prediction_pb2.SeldonMessage = None, - prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0, - deployment_name: str = "", namespace: str = None, - gateway_endpoint: str = "localhost:8003", headers: Dict = None, gateway_prefix: str = None, - **kwargs) -> SeldonClientFeedback: +def rest_feedback_gateway( + prediction_request: prediction_pb2.SeldonMessage = None, + prediction_response: prediction_pb2.SeldonMessage = None, + reward: float = 0, + deployment_name: str = "", + namespace: str = None, + gateway_endpoint: str = "localhost:8003", + headers: Dict = None, + gateway_prefix: str = None, + **kwargs, +) -> SeldonClientFeedback: """ Send Feedback to Seldon via gateway using REST @@ -1574,24 +1984,39 @@ def rest_feedback_gateway(prediction_request: prediction_pb2.SeldonMessage = Non A Seldon Feedback Response """ - request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward) + request = prediction_pb2.Feedback( + request=prediction_request, response=prediction_response, reward=reward + ) payload = feedback_to_json(request) if gateway_prefix is None: if namespace is None: response_raw = requests.post( - "http://" + gateway_endpoint + "/seldon/" + deployment_name + "/api/v0.1/feedback", + "http://" + + gateway_endpoint + + "/seldon/" + + deployment_name + + "/api/v0.1/feedback", json=payload, - headers=headers) + headers=headers, + ) else: response_raw = requests.post( - "http://" + gateway_endpoint + "/seldon/" + namespace + "/" + deployment_name + "/api/v0.1/feedback", + "http://" + + gateway_endpoint + + "/seldon/" + + namespace + + "/" + + deployment_name + + "/api/v0.1/feedback", json=payload, - headers=headers) + headers=headers, + ) else: response_raw = requests.post( "http://" + gateway_endpoint + gateway_prefix + "/api/v0.1/feedback", json=payload, - headers=headers) + headers=headers, + ) if response_raw.status_code == 200: success = True @@ -1612,14 +2037,18 @@ def rest_feedback_gateway(prediction_request: prediction_pb2.SeldonMessage = Non return SeldonClientFeedback(request, None, False, str(e)) -def grpc_feedback_gateway(prediction_request: prediction_pb2.SeldonMessage = None, - prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0, - deployment_name: str = "", namespace: str = None, - gateway_endpoint: str = "localhost:8003", - headers: Dict = None, - grpc_max_send_message_length: int = 4 * 1024 * 1024, - grpc_max_receive_message_length: int = 4 * 1024 * 1024, - **kwargs) -> SeldonClientFeedback: +def grpc_feedback_gateway( + prediction_request: prediction_pb2.SeldonMessage = None, + prediction_response: prediction_pb2.SeldonMessage = None, + reward: float = 0, + deployment_name: str = "", + namespace: str = None, + gateway_endpoint: str = "localhost:8003", + headers: Dict = None, + grpc_max_send_message_length: int = 4 * 1024 * 1024, + grpc_max_receive_message_length: int = 4 * 1024 * 1024, + **kwargs, +) -> SeldonClientFeedback: """ Parameters @@ -1648,15 +2077,21 @@ def grpc_feedback_gateway(prediction_request: prediction_pb2.SeldonMessage = Non ------- """ - request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward) - channel = grpc.insecure_channel(gateway_endpoint, options=[ - ('grpc.max_send_message_length', grpc_max_send_message_length), - ('grpc.max_receive_message_length', grpc_max_receive_message_length)]) + request = prediction_pb2.Feedback( + request=prediction_request, response=prediction_response, reward=reward + ) + channel = grpc.insecure_channel( + gateway_endpoint, + options=[ + ("grpc.max_send_message_length", grpc_max_send_message_length), + ("grpc.max_receive_message_length", grpc_max_receive_message_length), + ], + ) stub = prediction_pb2_grpc.SeldonStub(channel) if namespace is None: - metadata = [('seldon', deployment_name)] + metadata = [("seldon", deployment_name)] else: - metadata = [('seldon', deployment_name), ('namespace', namespace)] + metadata = [("seldon", deployment_name), ("namespace", namespace)] if not headers is None: for k in headers: metadata.append((k, headers[k])) diff --git a/python/seldon_core/seldon_methods.py b/python/seldon_core/seldon_methods.py index b3e11aa4ca..44f0071afb 100644 --- a/python/seldon_core/seldon_methods.py +++ b/python/seldon_core/seldon_methods.py @@ -1,10 +1,21 @@ import logging -from seldon_core.utils import extract_request_parts, construct_response, \ - json_to_seldon_message, construct_response_json, \ - extract_request_parts_json, extract_feedback_request_parts -from seldon_core.user_model import client_predict, client_aggregate, \ - client_route, client_transform_output, client_transform_input, \ - client_send_feedback, SeldonNotImplementedError +from seldon_core.utils import ( + extract_request_parts, + construct_response, + json_to_seldon_message, + construct_response_json, + extract_request_parts_json, + extract_feedback_request_parts, +) +from seldon_core.user_model import ( + client_predict, + client_aggregate, + client_route, + client_transform_output, + client_transform_input, + client_send_feedback, + SeldonNotImplementedError, +) from seldon_core.flask_utils import SeldonMicroserviceException from google.protobuf import json_format from seldon_core.proto import prediction_pb2 @@ -15,9 +26,8 @@ def predict( - user_model: Any, - request: Union[prediction_pb2.SeldonMessage, List, Dict]) \ - -> Union[prediction_pb2.SeldonMessage, List, Dict]: + user_model: Any, request: Union[prediction_pb2.SeldonMessage, List, Dict] +) -> Union[prediction_pb2.SeldonMessage, List, Dict]: """ Call the user model to get a prediction and package the response @@ -48,19 +58,22 @@ def predict( if is_proto: (features, meta, datadef, data_type) = extract_request_parts(request) - client_response = client_predict(user_model, features, datadef.names, meta=meta) + client_response = client_predict( + user_model, features, datadef.names, meta=meta + ) return construct_response(user_model, False, request, client_response) else: (features, meta, datadef, data_type) = extract_request_parts_json(request) class_names = datadef["names"] if datadef and "names" in datadef else [] - client_response = client_predict(user_model, features, class_names, meta=meta) + client_response = client_predict( + user_model, features, class_names, meta=meta + ) return construct_response_json(user_model, False, request, client_response) + def send_feedback( - user_model: Any, - request: prediction_pb2.Feedback, - predictive_unit_id: str) \ - -> prediction_pb2.SeldonMessage: + user_model: Any, request: prediction_pb2.Feedback, predictive_unit_id: str +) -> prediction_pb2.SeldonMessage: """ Parameters @@ -92,9 +105,13 @@ def send_feedback( except SeldonNotImplementedError: pass - (datadef_request, features, truth, reward) = extract_feedback_request_parts(request) + (datadef_request, features, truth, reward) = extract_feedback_request_parts( + request + ) routing = request.response.meta.routing.get(predictive_unit_id) - client_response = client_send_feedback(user_model, features, datadef_request.names, reward, truth, routing) + client_response = client_send_feedback( + user_model, features, datadef_request.names, reward, truth, routing + ) if client_response is None: client_response = np.array([]) @@ -104,9 +121,8 @@ def send_feedback( def transform_input( - user_model: Any, - request: Union[prediction_pb2.SeldonMessage, List, Dict]) \ - -> Union[prediction_pb2.SeldonMessage, List, Dict]: + user_model: Any, request: Union[prediction_pb2.SeldonMessage, List, Dict] +) -> Union[prediction_pb2.SeldonMessage, List, Dict]: """ Parameters @@ -124,10 +140,14 @@ def transform_input( is_proto = isinstance(request, prediction_pb2.SeldonMessage) if hasattr(user_model, "transform_input_rest"): - logger.warning("transform_input_rest is deprecated. Please use transform_input_raw") + logger.warning( + "transform_input_rest is deprecated. Please use transform_input_raw" + ) return user_model.transform_input_rest(request) elif hasattr(user_model, "transform_input_grpc"): - logger.warning("transform_input_grpc is deprecated. Please use transform_input_raw") + logger.warning( + "transform_input_grpc is deprecated. Please use transform_input_raw" + ) return user_model.transform_input_grpc(request) else: if hasattr(user_model, "transform_input_raw"): @@ -139,19 +159,21 @@ def transform_input( if is_proto: (features, meta, datadef, data_type) = extract_request_parts(request) client_response = client_transform_input( - user_model, features, datadef.names, meta=meta) + user_model, features, datadef.names, meta=meta + ) return construct_response(user_model, False, request, client_response) else: (features, meta, datadef, data_type) = extract_request_parts_json(request) class_names = datadef["names"] if datadef and "names" in datadef else [] client_response = client_transform_input( - user_model, features, class_names, meta=meta) + user_model, features, class_names, meta=meta + ) return construct_response_json(user_model, False, request, client_response) + def transform_output( - user_model: Any, - request: Union[prediction_pb2.SeldonMessage, List, Dict]) \ - -> Union[prediction_pb2.SeldonMessage, List, Dict]: + user_model: Any, request: Union[prediction_pb2.SeldonMessage, List, Dict] +) -> Union[prediction_pb2.SeldonMessage, List, Dict]: """ Parameters @@ -169,10 +191,14 @@ def transform_output( is_proto = isinstance(request, prediction_pb2.SeldonMessage) if hasattr(user_model, "transform_output_rest"): - logger.warning("transform_input_rest is deprecated. Please use transform_input_raw") + logger.warning( + "transform_input_rest is deprecated. Please use transform_input_raw" + ) return user_model.transform_output_rest(request) elif hasattr(user_model, "transform_output_grpc"): - logger.warning("transform_input_grpc is deprecated. Please use transform_input_raw") + logger.warning( + "transform_input_grpc is deprecated. Please use transform_input_raw" + ) return user_model.transform_output_grpc(request) else: if hasattr(user_model, "transform_output_raw"): @@ -184,19 +210,21 @@ def transform_output( if is_proto: (features, meta, datadef, data_type) = extract_request_parts(request) client_response = client_transform_output( - user_model, features, datadef.names, meta=meta) + user_model, features, datadef.names, meta=meta + ) return construct_response(user_model, False, request, client_response) else: (features, meta, datadef, data_type) = extract_request_parts_json(request) class_names = datadef["names"] if datadef and "names" in datadef else [] client_response = client_transform_output( - user_model, features, class_names, meta=meta) + user_model, features, class_names, meta=meta + ) return construct_response_json(user_model, False, request, client_response) + def route( - user_model: Any, - request: Union[prediction_pb2.SeldonMessage, List, Dict]) \ - -> Union[prediction_pb2.SeldonMessage, List, Dict]: + user_model: Any, request: Union[prediction_pb2.SeldonMessage, List, Dict] +) -> Union[prediction_pb2.SeldonMessage, List, Dict]: """ Parameters @@ -226,23 +254,30 @@ def route( if is_proto: (features, meta, datadef, data_type) = extract_request_parts(request) - client_response = client_route( - user_model, features, datadef.names) + client_response = client_route(user_model, features, datadef.names) if not isinstance(client_response, int): - raise SeldonMicroserviceException("Routing response must be int but got " + str(client_response)) + raise SeldonMicroserviceException( + "Routing response must be int but got " + str(client_response) + ) client_response_arr = np.array([[client_response]]) return construct_response(user_model, False, request, client_response_arr) else: (features, meta, datadef, data_type) = extract_request_parts_json(request) class_names = datadef["names"] if datadef and "names" in datadef else [] - client_response = client_route( - user_model, features, class_names) + client_response = client_route(user_model, features, class_names) if not isinstance(client_response, int): - raise SeldonMicroserviceException("Routing response must be int but got " + str(client_response)) + raise SeldonMicroserviceException( + "Routing response must be int but got " + str(client_response) + ) client_response_arr = np.array([[client_response]]) - return construct_response_json(user_model, False, request, client_response_arr) + return construct_response_json( + user_model, False, request, client_response_arr + ) -def aggregate(user_model: Any, request: prediction_pb2.SeldonMessageList) -> prediction_pb2.SeldonMessage: + +def aggregate( + user_model: Any, request: prediction_pb2.SeldonMessageList +) -> prediction_pb2.SeldonMessage: """ Aggregate a list of payloads @@ -283,14 +318,19 @@ def aggregate(user_model: Any, request: prediction_pb2.SeldonMessageList) -> pre names_list.append(datadef.names) client_response = client_aggregate(user_model, features_list, names_list) - return construct_response(user_model, False, request.seldonMessages[0], client_response) + return construct_response( + user_model, False, request.seldonMessages[0], client_response + ) else: features_list = [] names_list = [] - if "seldonMessages" not in request or \ - not isinstance(request["seldonMessages"], list): - raise SeldonMicroserviceException(f"Invalid request data type: {request}") + if "seldonMessages" not in request or not isinstance( + request["seldonMessages"], list + ): + raise SeldonMicroserviceException( + f"Invalid request data type: {request}" + ) for msg in request["seldonMessages"]: (features, meta, datadef, data_type) = extract_request_parts_json(msg) @@ -299,5 +339,6 @@ def aggregate(user_model: Any, request: prediction_pb2.SeldonMessageList) -> pre names_list.append(class_names) client_response = client_aggregate(user_model, features_list, names_list) - return construct_response_json(user_model, False, request["seldonMessages"][0], client_response) - + return construct_response_json( + user_model, False, request["seldonMessages"][0], client_response + ) diff --git a/python/seldon_core/serving_test_gen.py b/python/seldon_core/serving_test_gen.py index 08800d7950..50deef54c2 100644 --- a/python/seldon_core/serving_test_gen.py +++ b/python/seldon_core/serving_test_gen.py @@ -58,7 +58,9 @@ def _column_values(column: pd.Series) -> Union[List, float]: return np.NaN -def create_seldon_api_testing_file(data: pd.DataFrame, target: str, output_path: str) -> bool: +def create_seldon_api_testing_file( + data: pd.DataFrame, target: str, output_path: str +) -> bool: """ Create a JSON file for Seldon API testing. @@ -78,9 +80,14 @@ def create_seldon_api_testing_file(data: pd.DataFrame, target: str, output_path: # create a Data frame in the form of JSON object df_for_json = pd.DataFrame(data=data.columns.values, columns=["name"]) - df_for_json["dtype"] = np.where(data.dtypes == np.float, 'FLOAT', - np.where(data.dtypes == np.int, 'INTEGER', np.NaN)) - df_for_json["ftype"] = np.where(data.dtypes == np.number, 'continuous', 'categorical') + df_for_json["dtype"] = np.where( + data.dtypes == np.float, + "FLOAT", + np.where(data.dtypes == np.int, "INTEGER", np.NaN), + ) + df_for_json["ftype"] = np.where( + data.dtypes == np.number, "continuous", "categorical" + ) ranges = [_column_range(data[column_name]) for column_name in data.columns.values] values = [_column_values(data[column_name]) for column_name in data.columns.values] df_for_json["range"] = ranges @@ -90,12 +97,16 @@ def create_seldon_api_testing_file(data: pd.DataFrame, target: str, output_path: df_for_json_features = df_for_json[df_for_json.name != target] # Convert data frames to JSON with a trick that removes records with NaNs - json_features_df = df_for_json_features.T.apply(lambda row: row[~row.isnull()].to_json()) + json_features_df = df_for_json_features.T.apply( + lambda row: row[~row.isnull()].to_json() + ) json_features = f'[{",".join(json_features_df)}]' - json_target_df = df_for_json_target.T.apply(lambda row: row[~row.isnull()].to_json()) + json_target_df = df_for_json_target.T.apply( + lambda row: row[~row.isnull()].to_json() + ) json_target = f'[{",".join(json_target_df)}]' json_combined = f'{{"features": {json_features}, "targets": {json_target}}}' - with open(output_path, 'w+') as output_file: + with open(output_path, "w+") as output_file: output_file.write(str(json_combined)) return os.path.exists(output_path) diff --git a/python/seldon_core/storage.py b/python/seldon_core/storage.py index 3a2351db69..3d6470066c 100644 --- a/python/seldon_core/storage.py +++ b/python/seldon_core/storage.py @@ -33,7 +33,7 @@ _LOCAL_PREFIX = "file://" -class Storage(object): # pylint: disable=too-few-public-methods +class Storage(object): # pylint: disable=too-few-public-methods @staticmethod def download(uri: str, out_dir: str = None) -> str: logging.info("Copying contents of %s to local", uri) @@ -57,9 +57,12 @@ def download(uri: str, out_dir: str = None) -> str: elif is_local: return Storage._download_local(uri, out_dir) else: - raise Exception("Cannot recognize storage type for " + uri + - "\n'%s', '%s', and '%s' are the current available storage type." % - (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX)) + raise Exception( + "Cannot recognize storage type for " + + uri + + "\n'%s', '%s', and '%s' are the current available storage type." + % (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX) + ) logging.info("Successfully copied %s to %s", uri, out_dir) return out_dir @@ -78,8 +81,11 @@ def _download_s3(uri, temp_dir: str): if not obj.is_dir: if subdir_object_key == "": subdir_object_key = obj.object_name - client.fget_object(bucket_name, obj.object_name, - os.path.join(temp_dir, subdir_object_key)) + client.fget_object( + bucket_name, + obj.object_name, + os.path.join(temp_dir, subdir_object_key), + ) @staticmethod def _download_gcs(uri, temp_dir: str): @@ -101,7 +107,9 @@ def _download_gcs(uri, temp_dir: str): # Create necessary subdirectory to store the object locally if "/" in subdir_object_key: - local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0]) + local_object_dir = os.path.join( + temp_dir, subdir_object_key.rsplit("/", 1)[0] + ) if not os.path.isdir(local_object_dir): os.makedirs(local_object_dir, exist_ok=True) if subdir_object_key.strip() != "": @@ -116,7 +124,11 @@ def _download_blob(uri, out_dir: str): storage_url = match.group(2) container_name, prefix = storage_url.split("/", 1) - logging.info("Connecting to BLOB account: %s, contianer: %s", account_name, container_name) + logging.info( + "Connecting to BLOB account: %s, contianer: %s", + account_name, + container_name, + ) block_blob_service = BlockBlobService(account_name=account_name) blobs = block_blob_service.list_blobs(container_name, prefix=prefix) @@ -156,9 +168,13 @@ def _download_local(uri, out_dir=None): def _create_minio_client(): # Remove possible http scheme for Minio url = urlparse(os.getenv("S3_ENDPOINT", "")) - use_ssl = url.scheme=='https' if url.scheme else bool(os.getenv("USE_SSL", True)) - minioClient = Minio(url.netloc, - access_key=os.getenv("AWS_ACCESS_KEY_ID", ""), - secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""), - secure=use_ssl) + use_ssl = ( + url.scheme == "https" if url.scheme else bool(os.getenv("USE_SSL", True)) + ) + minioClient = Minio( + url.netloc, + access_key=os.getenv("AWS_ACCESS_KEY_ID", ""), + secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""), + secure=use_ssl, + ) return minioClient diff --git a/python/seldon_core/tf_helper.py b/python/seldon_core/tf_helper.py index 651202d936..087ee47de8 100644 --- a/python/seldon_core/tf_helper.py +++ b/python/seldon_core/tf_helper.py @@ -3,6 +3,7 @@ try: import tensorflow # noqa: F401 + _TF_MISSING = False except ImportError: _TF_MISSING = True diff --git a/python/seldon_core/user_model.py b/python/seldon_core/user_model.py index 26eb44639e..b7c9a5e74b 100644 --- a/python/seldon_core/user_model.py +++ b/python/seldon_core/user_model.py @@ -9,14 +9,15 @@ logger = logging.getLogger(__name__) + class SeldonNotImplementedError(SeldonMicroserviceException): status_code = 400 def __init__(self, message): SeldonMicroserviceException.__init__(self, message) -class SeldonComponent(object): +class SeldonComponent(object): def __init__(self, **kwargs): pass @@ -29,28 +30,39 @@ def class_names(self) -> Iterable[str]: def load(self): pass - def predict(self, X: np.ndarray, names: Iterable[str], meta: Dict = None) -> Union[ - np.ndarray, List, str, bytes]: + def predict( + self, X: np.ndarray, names: Iterable[str], meta: Dict = None + ) -> Union[np.ndarray, List, str, bytes]: raise SeldonNotImplementedError("predict is not implemented") - def predict_raw(self, msg: prediction_pb2.SeldonMessage) -> prediction_pb2.SeldonMessage: + def predict_raw( + self, msg: prediction_pb2.SeldonMessage + ) -> prediction_pb2.SeldonMessage: raise SeldonNotImplementedError("predict_raw is not implemented") - def send_feedback_raw(self, feedback: prediction_pb2.Feedback) -> prediction_pb2.SeldonMessage: + def send_feedback_raw( + self, feedback: prediction_pb2.Feedback + ) -> prediction_pb2.SeldonMessage: raise SeldonNotImplementedError("send_feedback_raw is not implemented") - def transform_input(self, X: np.ndarray, names: Iterable[str], meta: Dict = None) -> Union[ - np.ndarray, List, str, bytes]: + def transform_input( + self, X: np.ndarray, names: Iterable[str], meta: Dict = None + ) -> Union[np.ndarray, List, str, bytes]: raise SeldonNotImplementedError("transform_input is not implemented") - def transform_input_raw(self, msg: prediction_pb2.SeldonMessage) -> prediction_pb2.SeldonMessage: + def transform_input_raw( + self, msg: prediction_pb2.SeldonMessage + ) -> prediction_pb2.SeldonMessage: raise SeldonNotImplementedError("transform_input_raw is not implemented") - def transform_output(self, X: np.ndarray, names: Iterable[str], meta: Dict = None) -> Union[ - np.ndarray, List, str, bytes]: + def transform_output( + self, X: np.ndarray, names: Iterable[str], meta: Dict = None + ) -> Union[np.ndarray, List, str, bytes]: raise SeldonNotImplementedError("transform_output is not implemented") - def transform_output_raw(self, msg: prediction_pb2.SeldonMessage) -> prediction_pb2.SeldonMessage: + def transform_output_raw( + self, msg: prediction_pb2.SeldonMessage + ) -> prediction_pb2.SeldonMessage: raise SeldonNotImplementedError("transform_output_raw is not implemented") def metrics(self) -> List[Dict]: @@ -59,22 +71,36 @@ def metrics(self) -> List[Dict]: def feature_names(self) -> Iterable[str]: raise SeldonNotImplementedError("feature_names is not implemented") - def send_feedback(self, features: Union[np.ndarray, str, bytes], feature_names: Iterable[str], reward: float, - truth: Union[np.ndarray, str, bytes], routing: Union[int, None]) -> Union[ - np.ndarray, List, str, bytes, None]: + def send_feedback( + self, + features: Union[np.ndarray, str, bytes], + feature_names: Iterable[str], + reward: float, + truth: Union[np.ndarray, str, bytes], + routing: Union[int, None], + ) -> Union[np.ndarray, List, str, bytes, None]: raise SeldonNotImplementedError("send_feedback is not implemented") - def route(self, features: Union[np.ndarray, str, bytes], feature_names: Iterable[str]) -> int: + def route( + self, features: Union[np.ndarray, str, bytes], feature_names: Iterable[str] + ) -> int: raise SeldonNotImplementedError("route is not implemented") - def route_raw(self, msg: prediction_pb2.SeldonMessage) -> prediction_pb2.SeldonMessage: + def route_raw( + self, msg: prediction_pb2.SeldonMessage + ) -> prediction_pb2.SeldonMessage: raise SeldonNotImplementedError("route_raw is not implemented") - def aggregate(self, features_list: List[Union[np.ndarray, str, bytes]], feature_names_list: List) -> Union[ - np.ndarray, List, str, bytes]: + def aggregate( + self, + features_list: List[Union[np.ndarray, str, bytes]], + feature_names_list: List, + ) -> Union[np.ndarray, List, str, bytes]: raise SeldonNotImplementedError("aggregate is not implemented") - def aggregate_raw(self, msgs: prediction_pb2.SeldonMessageList) -> prediction_pb2.SeldonMessage: + def aggregate_raw( + self, msgs: prediction_pb2.SeldonMessageList + ) -> prediction_pb2.SeldonMessage: raise SeldonNotImplementedError("aggregate_raw is not implemented") @@ -100,7 +126,9 @@ def client_custom_tags(user_model: SeldonComponent) -> Dict: return {} -def client_class_names(user_model: SeldonComponent, predictions: np.ndarray) -> Iterable[str]: +def client_class_names( + user_model: SeldonComponent, predictions: np.ndarray +) -> Iterable[str]: """ Get class names from user model @@ -116,13 +144,15 @@ def client_class_names(user_model: SeldonComponent, predictions: np.ndarray) -> """ if len(predictions.shape) > 1: if hasattr(user_model, "class_names"): - if inspect.ismethod(getattr(user_model, 'class_names')): + if inspect.ismethod(getattr(user_model, "class_names")): try: return user_model.class_names() except SeldonNotImplementedError: pass else: - logger.info("class_names attribute is deprecated. Please define a class_names method") + logger.info( + "class_names attribute is deprecated. Please define a class_names method" + ) return user_model.class_names logger.info("class_names is not implemented") n_targets = predictions.shape[1] @@ -131,8 +161,12 @@ def client_class_names(user_model: SeldonComponent, predictions: np.ndarray) -> return [] -def client_predict(user_model: SeldonComponent, features: Union[np.ndarray, str, bytes], feature_names: Iterable[str], - **kwargs: Dict) -> Union[np.ndarray, List, str, bytes]: +def client_predict( + user_model: SeldonComponent, + features: Union[np.ndarray, str, bytes], + feature_names: Iterable[str], + **kwargs: Dict +) -> Union[np.ndarray, List, str, bytes]: """ Get prediction from user model @@ -162,8 +196,12 @@ def client_predict(user_model: SeldonComponent, features: Union[np.ndarray, str, return [] -def client_transform_input(user_model: SeldonComponent, features: Union[np.ndarray, str, bytes], - feature_names: Iterable[str], **kwargs: Dict) -> Union[np.ndarray, List, str, bytes]: +def client_transform_input( + user_model: SeldonComponent, + features: Union[np.ndarray, str, bytes], + feature_names: Iterable[str], + **kwargs: Dict +) -> Union[np.ndarray, List, str, bytes]: """ Transform data with user model @@ -195,8 +233,12 @@ def client_transform_input(user_model: SeldonComponent, features: Union[np.ndarr return features -def client_transform_output(user_model: SeldonComponent, features: Union[np.ndarray, str, bytes], - feature_names: Iterable[str], **kwargs: Dict) -> Union[np.ndarray, List, str, bytes]: +def client_transform_output( + user_model: SeldonComponent, + features: Union[np.ndarray, str, bytes], + feature_names: Iterable[str], + **kwargs: Dict +) -> Union[np.ndarray, List, str, bytes]: """ Transform output @@ -247,7 +289,9 @@ def client_custom_metrics(user_model: SeldonComponent) -> List[Dict]: if not validate_metrics(metrics): j_str = json.dumps(metrics) raise SeldonMicroserviceException( - "Bad metric created during request: " + j_str, reason="MICROSERVICE_BAD_METRIC") + "Bad metric created during request: " + j_str, + reason="MICROSERVICE_BAD_METRIC", + ) return metrics except SeldonNotImplementedError: pass @@ -255,7 +299,9 @@ def client_custom_metrics(user_model: SeldonComponent) -> List[Dict]: return [] -def client_feature_names(user_model: SeldonComponent, original: Iterable[str]) -> Iterable[str]: +def client_feature_names( + user_model: SeldonComponent, original: Iterable[str] +) -> Iterable[str]: """ Get feature names for user model @@ -278,10 +324,14 @@ def client_feature_names(user_model: SeldonComponent, original: Iterable[str]) - return original -def client_send_feedback(user_model: SeldonComponent, features: Union[np.ndarray, str, bytes], - feature_names: Iterable[str], - reward: float, truth: Union[np.ndarray, str, bytes], routing: Union[int, None]) \ - -> Union[np.ndarray, List, str, bytes, None]: +def client_send_feedback( + user_model: SeldonComponent, + features: Union[np.ndarray, str, bytes], + feature_names: Iterable[str], + reward: float, + truth: Union[np.ndarray, str, bytes], + routing: Union[int, None], +) -> Union[np.ndarray, List, str, bytes, None]: """ Feedback to user model @@ -307,15 +357,20 @@ def client_send_feedback(user_model: SeldonComponent, features: Union[np.ndarray """ if hasattr(user_model, "send_feedback"): try: - return user_model.send_feedback(features, feature_names, reward, truth, routing=routing) + return user_model.send_feedback( + features, feature_names, reward, truth, routing=routing + ) except SeldonNotImplementedError: pass logger.info("send_feedback is not implemented") return None -def client_route(user_model: SeldonComponent, features: Union[np.ndarray, str, bytes], - feature_names: Iterable[str]) -> int: +def client_route( + user_model: SeldonComponent, + features: Union[np.ndarray, str, bytes], + feature_names: Iterable[str], +) -> int: """ Get routing from user model @@ -338,8 +393,11 @@ def client_route(user_model: SeldonComponent, features: Union[np.ndarray, str, b raise SeldonNotImplementedError("Route not defined") -def client_aggregate(user_model: SeldonComponent, features_list: List[Union[np.ndarray, str, bytes]], - feature_names_list: List) -> Union[np.ndarray, List, str, bytes]: +def client_aggregate( + user_model: SeldonComponent, + features_list: List[Union[np.ndarray, str, bytes]], + feature_names_list: List, +) -> Union[np.ndarray, List, str, bytes]: """ Aggregate payloads diff --git a/python/seldon_core/utils.py b/python/seldon_core/utils.py index 533f8cdaf7..4f27b8dcd4 100644 --- a/python/seldon_core/utils.py +++ b/python/seldon_core/utils.py @@ -10,8 +10,13 @@ from seldon_core.proto import prediction_pb2 from seldon_core.flask_utils import SeldonMicroserviceException -from seldon_core.user_model import client_class_names, client_custom_metrics, client_custom_tags, client_feature_names, \ - SeldonComponent +from seldon_core.user_model import ( + client_class_names, + client_custom_metrics, + client_custom_tags, + client_feature_names, + SeldonComponent, +) from seldon_core.tf_helper import _TF_MISSING from typing import Tuple, Dict, Union, List, Optional, Iterable @@ -21,7 +26,9 @@ from tensorflow.core.framework.tensor_pb2 import TensorProto -def json_to_seldon_message(message_json: Union[List, Dict]) -> prediction_pb2.SeldonMessage: +def json_to_seldon_message( + message_json: Union[List, Dict] +) -> prediction_pb2.SeldonMessage: """ Parses JSON input to a SeldonMessage proto @@ -124,7 +131,9 @@ def feedback_to_json(message_proto: prediction_pb2.Feedback) -> Dict: return message_dict -def get_data_from_proto(request: prediction_pb2.SeldonMessage) -> Union[np.ndarray, str, bytes, dict]: +def get_data_from_proto( + request: prediction_pb2.SeldonMessage +) -> Union[np.ndarray, str, bytes, dict]: """ Extract the data payload from the SeldonMessage @@ -151,6 +160,7 @@ def get_data_from_proto(request: prediction_pb2.SeldonMessage) -> Union[np.ndarr else: raise SeldonMicroserviceException("Unknown data in SeldonMessage") + def grpc_datadef_to_array(datadef: prediction_pb2.DefaultData) -> np.ndarray: """ Convert a SeldonMessage DefaultData to a numpy array. @@ -171,13 +181,13 @@ def grpc_datadef_to_array(datadef: prediction_pb2.DefaultData) -> np.ndarray: sz = np.prod(datadef.tensor.shape) # get number of float64 entries c = datadef.tensor.SerializeToString() # get bytes # create array from packed entries which are at end of bytes - assumes same endianness - features = np.frombuffer(memoryview( - c[-(sz * 8):]), dtype=np.float64, count=sz, offset=0) + features = np.frombuffer( + memoryview(c[-(sz * 8) :]), dtype=np.float64, count=sz, offset=0 + ) features = features.reshape(datadef.tensor.shape) else: # Python 2 version which is slower - features = np.array(datadef.tensor.values).reshape( - datadef.tensor.shape) + features = np.array(datadef.tensor.values).reshape(datadef.tensor.shape) elif data_type == "ndarray": py_arr = json_format.MessageToDict(datadef.ndarray) features = np.array(py_arr) @@ -206,7 +216,9 @@ def get_meta_from_proto(request: prediction_pb2.SeldonMessage) -> Dict: return meta -def array_to_rest_datadef(data_type: str, array: np.ndarray, names: Optional[List[str]] = []) -> Dict: +def array_to_rest_datadef( + data_type: str, array: np.ndarray, names: Optional[List[str]] = [] +) -> Dict: """ Construct a payload Dict from a numpy array @@ -223,10 +235,7 @@ def array_to_rest_datadef(data_type: str, array: np.ndarray, names: Optional[Lis """ datadef: Dict = {"names": names} if data_type == "tensor": - datadef["tensor"] = { - "shape": array.shape, - "values": array.ravel().tolist() - } + datadef["tensor"] = {"shape": array.shape, "values": array.ravel().tolist()} elif data_type == "ndarray": datadef["ndarray"] = array.tolist() elif data_type == "tftensor": @@ -239,8 +248,9 @@ def array_to_rest_datadef(data_type: str, array: np.ndarray, names: Optional[Lis return datadef -def array_to_grpc_datadef(data_type: str, array: np.ndarray, - names: Optional[Iterable[str]] = []) -> prediction_pb2.DefaultData: +def array_to_grpc_datadef( + data_type: str, array: np.ndarray, names: Optional[Iterable[str]] = [] +) -> prediction_pb2.DefaultData: """ Convert numpy array and optional column names into a SeldonMessage DefaultData proto @@ -262,24 +272,20 @@ def array_to_grpc_datadef(data_type: str, array: np.ndarray, datadef = prediction_pb2.DefaultData( names=names, tensor=prediction_pb2.Tensor( - shape=array.shape, - values=array.ravel().tolist() - ) + shape=array.shape, values=array.ravel().tolist() + ), ) elif data_type == "ndarray": datadef = prediction_pb2.DefaultData( - names=names, - ndarray=array_to_list_value(array) + names=names, ndarray=array_to_list_value(array) ) elif data_type == "tftensor": datadef = prediction_pb2.DefaultData( - names=names, - tftensor=tf.make_tensor_proto(array) + names=names, tftensor=tf.make_tensor_proto(array) ) else: datadef = prediction_pb2.DefaultData( - names=names, - ndarray=array_to_list_value(array) + names=names, ndarray=array_to_list_value(array) ) return datadef @@ -310,11 +316,13 @@ def array_to_list_value(array: np.ndarray, lv: Optional[ListValue] = None) -> Li array_to_list_value(sub_array, sub_lv) return lv + def construct_response_json( - user_model: SeldonComponent, - is_request: bool, - client_request_raw: Union[List, Dict], - client_raw_response: Union[np.ndarray, str, bytes, dict]) -> Union[List, Dict]: + user_model: SeldonComponent, + is_request: bool, + client_request_raw: Union[List, Dict], + client_raw_response: Union[np.ndarray, str, bytes, dict], +) -> Union[List, Dict]: """ This class converts a raw REST response into a JSON object that has the same structure as the SeldonMessage proto. This is necessary as the conversion using the SeldonMessage proto @@ -351,7 +359,8 @@ def construct_response_json( if not (is_np or is_list): raise SeldonMicroserviceException( "Unknown data type returned as payload (must be list or np array):" - + str(client_raw_response)) + + str(client_raw_response) + ) if is_np: np_client_raw_response = client_raw_response list_client_raw_response = client_raw_response.tolist() @@ -366,12 +375,13 @@ def construct_response_json( default_data_type = "tensor" result_client_response = { "values": np_client_raw_response.ravel().tolist(), - "shape": np_client_raw_response.shape + "shape": np_client_raw_response.shape, } elif "tftensor" in client_request_raw["data"]: default_data_type = "tftensor" tf_json_str = json_format.MessageToJson( - tf.make_tensor_proto(np_client_raw_response)) + tf.make_tensor_proto(np_client_raw_response) + ) result_client_response = json.loads(tf_json_str) else: default_data_type = "ndarray" @@ -384,7 +394,7 @@ def construct_response_json( default_data_type = "tensor" result_client_response = { "values": np_client_raw_response.ravel().tolist(), - "shape": np_client_raw_response.shape + "shape": np_client_raw_response.shape, } else: default_data_type = "ndarray" @@ -414,8 +424,12 @@ def construct_response_json( return response -def construct_response(user_model: SeldonComponent, is_request: bool, client_request: prediction_pb2.SeldonMessage, - client_raw_response: Union[np.ndarray, str, bytes, dict]) -> prediction_pb2.SeldonMessage: +def construct_response( + user_model: SeldonComponent, + is_request: bool, + client_request: prediction_pb2.SeldonMessage, + client_raw_response: Union[np.ndarray, str, bytes, dict], +) -> prediction_pb2.SeldonMessage: """ Parameters @@ -447,13 +461,17 @@ def construct_response(user_model: SeldonComponent, is_request: bool, client_req if client_request.meta.puid: meta_json["puid"] = client_request.meta.puid json_format.ParseDict(meta_json, meta) - if isinstance(client_raw_response, np.ndarray) or isinstance(client_raw_response, list): + if isinstance(client_raw_response, np.ndarray) or isinstance( + client_raw_response, list + ): client_raw_response = np.array(client_raw_response) if is_request: names = client_feature_names(user_model, client_request.data.names) else: names = client_class_names(user_model, client_raw_response) - if data_type == "data": # If request is using defaultdata then return what was sent if is numeric response else ndarray + if ( + data_type == "data" + ): # If request is using defaultdata then return what was sent if is numeric response else ndarray if np.issubdtype(client_raw_response.dtype, np.number): default_data_type = client_request.data.WhichOneof("data_oneof") else: @@ -468,20 +486,26 @@ def construct_response(user_model: SeldonComponent, is_request: bool, client_req elif isinstance(client_raw_response, str): return prediction_pb2.SeldonMessage(strData=client_raw_response, meta=meta) elif isinstance(client_raw_response, dict): - jsonDataResponse = ParseDict(client_raw_response, prediction_pb2.SeldonMessage().jsonData) + jsonDataResponse = ParseDict( + client_raw_response, prediction_pb2.SeldonMessage().jsonData + ) return prediction_pb2.SeldonMessage(jsonData=jsonDataResponse, meta=meta) elif isinstance(client_raw_response, (bytes, bytearray)): return prediction_pb2.SeldonMessage(binData=client_raw_response, meta=meta) else: - raise SeldonMicroserviceException("Unknown data type returned as payload:" + client_raw_response) + raise SeldonMicroserviceException( + "Unknown data type returned as payload:" + client_raw_response + ) -def extract_request_parts_json(request: Union[Dict, List] - ) -> Tuple[ - Union[np.ndarray, str, bytes, Dict, List], - Union[Dict, None], - Union[np.ndarray, str, bytes, Dict, List, None], - str]: +def extract_request_parts_json( + request: Union[Dict, List] +) -> Tuple[ + Union[np.ndarray, str, bytes, Dict, List], + Union[Dict, None], + Union[np.ndarray, str, bytes, Dict, List, None], + str, +]: """ Parameters @@ -531,8 +555,10 @@ def extract_request_parts_json(request: Union[Dict, List] return features, meta, datadef, data_type -def extract_request_parts(request: prediction_pb2.SeldonMessage) -> Tuple[ - Union[np.ndarray, str, bytes, dict], Dict, prediction_pb2.DefaultData, str]: + +def extract_request_parts( + request: prediction_pb2.SeldonMessage +) -> Tuple[Union[np.ndarray, str, bytes, dict], Dict, prediction_pb2.DefaultData, str]: """ Parameters @@ -552,8 +578,9 @@ def extract_request_parts(request: prediction_pb2.SeldonMessage) -> Tuple[ return features, meta, datadef, data_type -def extract_feedback_request_parts(request: prediction_pb2.Feedback) -> Tuple[ - prediction_pb2.DefaultData, np.ndarray, np.ndarray, float]: +def extract_feedback_request_parts( + request: prediction_pb2.Feedback +) -> Tuple[prediction_pb2.DefaultData, np.ndarray, np.ndarray, float]: """ Extract key parts of the Feedback Message diff --git a/python/seldon_core/version.py b/python/seldon_core/version.py index f0ede3d372..3d26edf777 100644 --- a/python/seldon_core/version.py +++ b/python/seldon_core/version.py @@ -1 +1 @@ -__version__ = '0.4.1' +__version__ = "0.4.1" diff --git a/python/seldon_core/wrapper.py b/python/seldon_core/wrapper.py index 28c141804b..ff13a21af3 100644 --- a/python/seldon_core/wrapper.py +++ b/python/seldon_core/wrapper.py @@ -3,10 +3,18 @@ from flask import jsonify, Flask, send_from_directory, request from flask_cors import CORS import logging -from seldon_core.utils import json_to_seldon_message, seldon_message_to_json, json_to_feedback, json_to_seldon_messages +from seldon_core.utils import ( + json_to_seldon_message, + seldon_message_to_json, + json_to_feedback, + json_to_seldon_messages, +) from seldon_core.flask_utils import get_request import seldon_core.seldon_methods -from seldon_core.flask_utils import SeldonMicroserviceException, ANNOTATION_GRPC_MAX_MSG_SIZE +from seldon_core.flask_utils import ( + SeldonMicroserviceException, + ANNOTATION_GRPC_MAX_MSG_SIZE, +) from seldon_core.proto import prediction_pb2_grpc import os @@ -16,11 +24,11 @@ def get_rest_microservice(user_model): - app = Flask(__name__, static_url_path='') + app = Flask(__name__, static_url_path="") CORS(app) - if hasattr(user_model, 'model_error_handler'): - logger.info('Registering the custom error handler...') + if hasattr(user_model, "model_error_handler"): + logger.info("Registering the custom error handler...") app.register_blueprint(user_model.model_error_handler) @app.errorhandler(SeldonMicroserviceException) @@ -32,7 +40,7 @@ def handle_invalid_usage(error): @app.route("/seldon.json", methods=["GET"]) def openAPI(): - return send_from_directory('', "openapi/seldon.json") + return send_from_directory("", "openapi/seldon.json") @app.route("/predict", methods=["GET", "POST"]) @app.route("/api/v0.1/predictions", methods=["POST"]) @@ -50,7 +58,9 @@ def SendFeedback(): logger.debug("REST Request: %s", request) requestProto = json_to_feedback(requestJson) logger.debug("Proto Request: %s", requestProto) - responseProto = seldon_core.seldon_methods.send_feedback(user_model, requestProto, PRED_UNIT_ID) + responseProto = seldon_core.seldon_methods.send_feedback( + user_model, requestProto, PRED_UNIT_ID + ) jsonDict = seldon_message_to_json(responseProto) return jsonify(jsonDict) @@ -58,8 +68,7 @@ def SendFeedback(): def TransformInput(): requestJson = get_request() logger.debug("REST Request: %s", request) - response = seldon_core.seldon_methods.transform_input( - user_model, requestJson) + response = seldon_core.seldon_methods.transform_input(user_model, requestJson) logger.debug("REST Response: %s", response) return jsonify(response) @@ -67,8 +76,7 @@ def TransformInput(): def TransformOutput(): requestJson = get_request() logger.debug("REST Request: %s", request) - response = seldon_core.seldon_methods.transform_output( - user_model, requestJson) + response = seldon_core.seldon_methods.transform_output(user_model, requestJson) logger.debug("REST Response: %s", response) return jsonify(response) @@ -76,8 +84,7 @@ def TransformOutput(): def Route(): requestJson = get_request() logger.debug("REST Request: %s", request) - response = seldon_core.seldon_methods.route( - user_model, requestJson) + response = seldon_core.seldon_methods.route(user_model, requestJson) logger.debug("REST Response: %s", response) return jsonify(response) @@ -85,8 +92,7 @@ def Route(): def Aggregate(): requestJson = get_request() logger.debug("REST Request: %s", request) - response = seldon_core.seldon_methods.aggregate( - user_model, requestJson) + response = seldon_core.seldon_methods.aggregate(user_model, requestJson) logger.debug("REST Response: %s", response) return jsonify(response) @@ -97,6 +103,7 @@ def Aggregate(): # GRPC # ---------------------------- + class SeldonModelGRPC(object): def __init__(self, user_model): self.user_model = user_model @@ -105,13 +112,17 @@ def Predict(self, request_grpc, context): return seldon_core.seldon_methods.predict(self.user_model, request_grpc) def SendFeedback(self, feedback_grpc, context): - return seldon_core.seldon_methods.send_feedback(self.user_model, feedback_grpc, PRED_UNIT_ID) + return seldon_core.seldon_methods.send_feedback( + self.user_model, feedback_grpc, PRED_UNIT_ID + ) def TransformInput(self, request_grpc, context): return seldon_core.seldon_methods.transform_input(self.user_model, request_grpc) def TransformOutput(self, request_grpc, context): - return seldon_core.seldon_methods.transform_output(self.user_model, request_grpc) + return seldon_core.seldon_methods.transform_output( + self.user_model, request_grpc + ) def Route(self, request_grpc, context): return seldon_core.seldon_methods.route(self.user_model, request_grpc) @@ -125,17 +136,16 @@ def get_grpc_server(user_model, annotations={}, trace_interceptor=None): options = [] if ANNOTATION_GRPC_MAX_MSG_SIZE in annotations: max_msg = int(annotations[ANNOTATION_GRPC_MAX_MSG_SIZE]) - logger.info( - "Setting grpc max message and receive length to %d", max_msg) - options.append(('grpc.max_message_length', max_msg)) - options.append(('grpc.max_send_message_length', max_msg)) - options.append(('grpc.max_receive_message_length', max_msg)) + logger.info("Setting grpc max message and receive length to %d", max_msg) + options.append(("grpc.max_message_length", max_msg)) + options.append(("grpc.max_send_message_length", max_msg)) + options.append(("grpc.max_receive_message_length", max_msg)) - server = grpc.server(futures.ThreadPoolExecutor( - max_workers=10), options=options) + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options) if trace_interceptor: from grpc_opentracing.grpcext import intercept_server + server = intercept_server(server, trace_interceptor) prediction_pb2_grpc.add_GenericServicer_to_server(seldon_model, server) diff --git a/python/setup.py b/python/setup.py index bb80926a61..9b089f656f 100644 --- a/python/setup.py +++ b/python/setup.py @@ -6,54 +6,48 @@ with open(os.path.join(dir_path, "seldon_core/version.py")) as fp: exec(fp.read(), version) -setup(name='seldon-core', - author='Seldon Technologies Ltd.', - author_email='hello@seldon.io', - version=version['__version__'], - description='Seldon Core client and microservice wrapper', - url='https://github.com/SeldonIO/seldon-core', - license='Apache 2.0', - packages=find_packages(), - include_package_data=True, - setup_requires=[ - 'pytest-runner' - ], - python_requires='>=3.6', - install_requires=[ - 'flask', - 'flask-cors', - 'redis', - 'requests', - 'numpy', - 'flatbuffers', - 'protobuf', - 'grpcio', - 'Flask-OpenTracing==0.2.0', - 'opentracing>=1.2.2,<2', - 'jaeger-client==3.13.0', - 'grpcio-opentracing', - 'pyyaml', - 'gunicorn>=19.9.0', - "minio >= 4.0.9", - "google-cloud-storage >= 1.16.0", - "azure-storage-blob >= 2.0.1", - "setuptools >= 41.0.0" - ], - tests_require=[ - 'pytest', - 'pytest-cov', - 'Pillow' - ], - extras_require={ - 'tensorflow': ['tensorflow'] - }, - test_suite='tests', - entry_points={ - 'console_scripts': [ - 'seldon-core-microservice = seldon_core.microservice:main', - 'seldon-core-tester = seldon_core.microservice_tester:main', - 'seldon-core-microservice-tester = seldon_core.microservice_tester:main', - 'seldon-core-api-tester = seldon_core.api_tester:main', - ], - }, - zip_safe=False) +setup( + name="seldon-core", + author="Seldon Technologies Ltd.", + author_email="hello@seldon.io", + version=version["__version__"], + description="Seldon Core client and microservice wrapper", + url="https://github.com/SeldonIO/seldon-core", + license="Apache 2.0", + packages=find_packages(), + include_package_data=True, + setup_requires=["pytest-runner"], + python_requires=">=3.6", + install_requires=[ + "flask", + "flask-cors", + "redis", + "requests", + "numpy", + "flatbuffers", + "protobuf", + "grpcio", + "Flask-OpenTracing==0.2.0", + "opentracing>=1.2.2,<2", + "jaeger-client==3.13.0", + "grpcio-opentracing", + "pyyaml", + "gunicorn>=19.9.0", + "minio >= 4.0.9", + "google-cloud-storage >= 1.16.0", + "azure-storage-blob >= 2.0.1", + "setuptools >= 41.0.0", + ], + tests_require=["pytest", "pytest-cov", "Pillow"], + extras_require={"tensorflow": ["tensorflow"]}, + test_suite="tests", + entry_points={ + "console_scripts": [ + "seldon-core-microservice = seldon_core.microservice:main", + "seldon-core-tester = seldon_core.microservice_tester:main", + "seldon-core-microservice-tester = seldon_core.microservice_tester:main", + "seldon-core-api-tester = seldon_core.api_tester:main", + ] + }, + zip_safe=False, +) diff --git a/python/tests/model-template-app/MyModel.py b/python/tests/model-template-app/MyModel.py index 91963bb6a3..4610f5cc75 100644 --- a/python/tests/model-template-app/MyModel.py +++ b/python/tests/model-template-app/MyModel.py @@ -1,6 +1,8 @@ import logging + logger = logging.getLogger(__name__) + class MyModel(object): """ Model template. You can load your model parameters in __init__ from a location accessible at runtime diff --git a/python/tests/model-template-app2/mymodule/my_model.py b/python/tests/model-template-app2/mymodule/my_model.py index 91963bb6a3..4610f5cc75 100644 --- a/python/tests/model-template-app2/mymodule/my_model.py +++ b/python/tests/model-template-app2/mymodule/my_model.py @@ -1,6 +1,8 @@ import logging + logger = logging.getLogger(__name__) + class MyModel(object): """ Model template. You can load your model parameters in __init__ from a location accessible at runtime diff --git a/python/tests/test_api_tester.py b/python/tests/test_api_tester.py index 11502daef1..6c3fb87ee0 100644 --- a/python/tests/test_api_tester.py +++ b/python/tests/test_api_tester.py @@ -30,25 +30,52 @@ def __init__(self, adict): self.__dict__.update(adict) -@mock.patch('requests.post', side_effect=mocked_requests_post_success) +@mock.patch("requests.post", side_effect=mocked_requests_post_success) def test_predict_rest(mock_post): filename = join(dirname(__file__), "model-template-app", "contract.json") - args_dict = {"contract": filename, "host": "a", "port": 1000, "n_requests": 1, "batch_size": 1, - "endpoint": "predict", "prnt": True, - "grpc": False, "tensor": True, "oauth_key": None, "oauth_secret": None, "deployment": "abc", - "namespace": None} + args_dict = { + "contract": filename, + "host": "a", + "port": 1000, + "n_requests": 1, + "batch_size": 1, + "endpoint": "predict", + "prnt": True, + "grpc": False, + "tensor": True, + "oauth_key": None, + "oauth_secret": None, + "deployment": "abc", + "namespace": None, + } args = Bunch(args_dict) run_predict(args) print(mock_post.call_args) - assert mock_post.call_args[1]["json"]["data"]["names"] == ["sepal_length", "sepal_width", "petal_length", "petal_width"] + assert mock_post.call_args[1]["json"]["data"]["names"] == [ + "sepal_length", + "sepal_width", + "petal_length", + "petal_width", + ] -@mock.patch('requests.post', side_effect=mocked_requests_post_success) +@mock.patch("requests.post", side_effect=mocked_requests_post_success) def test_feedback_rest(mock_post): filename = join(dirname(__file__), "model-template-app", "contract.json") - args_dict = {"contract": filename, "host": "a", "port": 1000, "n_requests": 1, "batch_size": 1, - "endpoint": "predict", "prnt": True, - "grpc": False, "tensor": True, "oauth_key": None, "oauth_secret": None, "deployment": "abc", - "namespace": None} + args_dict = { + "contract": filename, + "host": "a", + "port": 1000, + "n_requests": 1, + "batch_size": 1, + "endpoint": "predict", + "prnt": True, + "grpc": False, + "tensor": True, + "oauth_key": None, + "oauth_secret": None, + "deployment": "abc", + "namespace": None, + } args = Bunch(args_dict) run_send_feedback(args) diff --git a/python/tests/test_application_exception_microservice.py b/python/tests/test_application_exception_microservice.py index 695f8649d0..39fcb9ff8f 100644 --- a/python/tests/test_application_exception_microservice.py +++ b/python/tests/test_application_exception_microservice.py @@ -10,10 +10,11 @@ import flask from flask import jsonify + class UserCustomException(Exception): status_code = 404 - def __init__(self, message, application_error_code,http_status_code): + def __init__(self, message, application_error_code, http_status_code): Exception.__init__(self) self.message = message if http_status_code is not None: @@ -21,13 +22,19 @@ def __init__(self, message, application_error_code,http_status_code): self.application_error_code = application_error_code def to_dict(self): - rv = {"status": {"status": self.status_code, "message": self.message, - "app_code": self.application_error_code}} + rv = { + "status": { + "status": self.status_code, + "message": self.message, + "app_code": self.application_error_code, + } + } return rv + class UserObject(SeldonComponent): - model_error_handler = flask.Blueprint('error_handlers', __name__) + model_error_handler = flask.Blueprint("error_handlers", __name__) @model_error_handler.app_errorhandler(UserCustomException) def handleCustomError(error): @@ -35,7 +42,6 @@ def handleCustomError(error): response.status_code = error.status_code return response - def __init__(self, metrics_ok=True, ret_nparray=False, ret_meta=False): self.metrics_ok = metrics_ok self.ret_nparray = ret_nparray @@ -43,13 +49,13 @@ def __init__(self, metrics_ok=True, ret_nparray=False, ret_meta=False): self.ret_meta = ret_meta def predict(self, X, features_names, **kwargs): - raise UserCustomException('Test-Error-Msg',1402,402) + raise UserCustomException("Test-Error-Msg", 1402, 402) return X class UserObjectLowLevel(SeldonComponent): - model_error_handler = flask.Blueprint('error_handlers', __name__) + model_error_handler = flask.Blueprint("error_handlers", __name__) @model_error_handler.app_errorhandler(UserCustomException) def handleCustomError(error): @@ -57,14 +63,13 @@ def handleCustomError(error): response.status_code = error.status_code return response - def __init__(self, metrics_ok=True, ret_nparray=False): self.metrics_ok = metrics_ok self.ret_nparray = ret_nparray self.nparray = np.array([1, 2, 3]) def predict_rest(self, request): - raise UserCustomException('Test-Error-Msg',1402,402) + raise UserCustomException("Test-Error-Msg", 1402, 402) return {"data": {"ndarray": [9, 9]}} @@ -88,4 +93,3 @@ def test_raise_eception_lowlevel(): print(j) assert rv.status_code == 402 assert j["status"]["app_code"] == 1402 - diff --git a/python/tests/test_combiner_microservice.py b/python/tests/test_combiner_microservice.py index 0bd09d54c4..a6efea4a74 100644 --- a/python/tests/test_combiner_microservice.py +++ b/python/tests/test_combiner_microservice.py @@ -46,18 +46,14 @@ def aggregate_rest(self, Xs): return {"data": {"ndarray": [9, 9]}} def aggregate_grpc( - self, - request: Union[prediction_pb2.SeldonMessage, List, Dict]) \ - -> Union[prediction_pb2.SeldonMessage, List, Dict]: + self, request: Union[prediction_pb2.SeldonMessage, List, Dict] + ) -> Union[prediction_pb2.SeldonMessage, List, Dict]: is_proto = isinstance(request, prediction_pb2.SeldonMessage) arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) response = prediction_pb2.SeldonMessage(data=datadef) if is_proto: @@ -75,10 +71,7 @@ def __init__(self, metrics_ok=True, ret_nparray=False): def aggregate_grpc(self, X): arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) return request @@ -152,7 +145,9 @@ def test_aggreate_ok_2messages(): user_object = UserObject() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/aggregate?json={"seldonMessages":[{"data":{"ndarray":[1]}},{"data":{"ndarray":[2]}}]}') + rv = client.get( + '/aggregate?json={"seldonMessages":[{"data":{"ndarray":[1]}},{"data":{"ndarray":[2]}}]}' + ) print(rv) j = json.loads(rv.data) print(j) @@ -168,10 +163,15 @@ def test_aggreate_ok_bindata(): app = get_rest_microservice(user_object) client = app.test_client() bdata = b"123" - bdata_base64 = base64.b64encode(bdata).decode('utf-8') + bdata_base64 = base64.b64encode(bdata).decode("utf-8") rv = client.get( - '/aggregate?json={"seldonMessages":[{"binData":"' + bdata_base64 + '"},{"binData":"' + bdata_base64 + '"}]}') - bdata_base64_result = base64.b64encode(base64.b64encode(bdata)).decode('utf-8') + '/aggregate?json={"seldonMessages":[{"binData":"' + + bdata_base64 + + '"},{"binData":"' + + bdata_base64 + + '"}]}' + ) + bdata_base64_result = base64.b64encode(base64.b64encode(bdata)).decode("utf-8") print(rv) j = json.loads(rv.data) print(j) @@ -186,7 +186,9 @@ def test_aggreate_ok_strdata(): user_object = UserObject() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/aggregate?json={"seldonMessages":[{"strData":"123"},{"strData":"456"}]}') + rv = client.get( + '/aggregate?json={"seldonMessages":[{"strData":"123"},{"strData":"456"}]}' + ) print(rv) j = json.loads(rv.data) print(j) @@ -201,7 +203,9 @@ def test_aggregate_bad_metrics(): user_object = UserObject(metrics_ok=False) app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/aggregate?json={"seldonMessages":[{"data":{"ndarray":[1]}},{"data":{"ndarray":[2]}}]}') + rv = client.get( + '/aggregate?json={"seldonMessages":[{"data":{"ndarray":[1]}},{"data":{"ndarray":[2]}}]}' + ) j = json.loads(rv.data) print(j) assert rv.status_code == 400 @@ -211,7 +215,9 @@ def test_aggreate_ok_lowlevel(): user_object = UserObjectLowLevel() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/aggregate?json={"seldonMessages":[{"data":{"ndarray":[1]}},{"data":{"ndarray":[2]}}]}') + rv = client.get( + '/aggregate?json={"seldonMessages":[{"data":{"ndarray":[1]}},{"data":{"ndarray":[2]}}]}' + ) print(rv) j = json.loads(rv.data) print(j) @@ -224,17 +230,11 @@ def test_aggregate_proto_ok(): app = SeldonModelGRPC(user_object) arr1 = np.array([1, 2]) datadef1 = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr1 - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr1) ) arr2 = np.array([3, 4]) datadef2 = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr2 - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr2) ) msg1 = prediction_pb2.SeldonMessage(data=datadef1) msg2 = prediction_pb2.SeldonMessage(data=datadef2) @@ -266,17 +266,11 @@ def test_aggregate_proto_lowlevel_ok(): app = SeldonModelGRPC(user_object) arr1 = np.array([1, 2]) datadef1 = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr1 - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr1) ) arr2 = np.array([3, 4]) datadef2 = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr2 - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr2) ) msg1 = prediction_pb2.SeldonMessage(data=datadef1) msg2 = prediction_pb2.SeldonMessage(data=datadef2) @@ -302,7 +296,9 @@ def aggregate(self, Xs, features_names): user_object = CustomSeldonComponent() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/aggregate?json={"seldonMessages":[{"data":{"ndarray":[1]}},{"data":{"ndarray":[2]}}]}') + rv = client.get( + '/aggregate?json={"seldonMessages":[{"data":{"ndarray":[1]}},{"data":{"ndarray":[2]}}]}' + ) j = json.loads(rv.data) print(j) @@ -318,7 +314,9 @@ def aggregate(self, Xs, features_names): user_object = CustomObject() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/aggregate?json={"seldonMessages":[{"data":{"ndarray":[1]}},{"data":{"ndarray":[2]}}]}') + rv = client.get( + '/aggregate?json={"seldonMessages":[{"data":{"ndarray":[1]}},{"data":{"ndarray":[2]}}]}' + ) j = json.loads(rv.data) print(j) diff --git a/python/tests/test_metrics.py b/python/tests/test_metrics.py index d72d7412d4..f5ecdc8494 100644 --- a/python/tests/test_metrics.py +++ b/python/tests/test_metrics.py @@ -39,8 +39,7 @@ def test_create_gauge_invalid_value(): def test_validate_ok(): - assert validate_metrics( - [{"type": COUNTER, "key": "a", "value": 1}]) == True + assert validate_metrics([{"type": COUNTER, "key": "a", "value": 1}]) == True def test_validate_bad_type(): @@ -60,8 +59,7 @@ def test_validate_no_value(): def test_validate_bad_value(): - assert validate_metrics( - [{"type": COUNTER, "key": "a", "value": "1"}]) == False + assert validate_metrics([{"type": COUNTER, "key": "a", "value": "1"}]) == False def test_validate_no_list(): @@ -69,7 +67,6 @@ def test_validate_no_list(): class Component(object): - def __init__(self, ok=True): self.ok = ok @@ -100,8 +97,14 @@ def test_proto_metrics(): def test_proto_tags(): - metric = {"tags": {"t1": "t2"}, "metrics": [{"type": "COUNTER", "key": "mycounter", "value": 1.2}, { - "type": "GAUGE", "key": "mygauge", "value": 1.2}, {"type": "TIMER", "key": "mytimer", "value": 1.2}]} + metric = { + "tags": {"t1": "t2"}, + "metrics": [ + {"type": "COUNTER", "key": "mycounter", "value": 1.2}, + {"type": "GAUGE", "key": "mygauge", "value": 1.2}, + {"type": "TIMER", "key": "mytimer", "value": 1.2}, + ], + } meta = prediction_pb2.Meta() json_format.ParseDict(metric, meta) jStr = json_format.MessageToJson(meta) diff --git a/python/tests/test_microservice.py b/python/tests/test_microservice.py index e0fca775e2..93acd28f0d 100644 --- a/python/tests/test_microservice.py +++ b/python/tests/test_microservice.py @@ -15,20 +15,23 @@ import signal import unittest.mock as mock + @contextmanager -def start_microservice(app_location,tracing=False,grpc=False,envs={}): +def start_microservice(app_location, tracing=False, grpc=False, envs={}): p = None try: # PYTHONUNBUFFERED=x # exec python -u microservice.py $MODEL_NAME $API_TYPE --service-type $SERVICE_TYPE --persistence $PERSISTENCE env_vars = dict(os.environ) env_vars.update(envs) - env_vars.update({ - "PYTHONUNBUFFERED": "x", - "PYTHONPATH": app_location, - "APP_HOST": "127.0.0.1", - "SERVICE_PORT_ENV_NAME": "5000", - }) + env_vars.update( + { + "PYTHONUNBUFFERED": "x", + "PYTHONPATH": app_location, + "APP_HOST": "127.0.0.1", + "SERVICE_PORT_ENV_NAME": "5000", + } + ) with open(join(app_location, ".s2i", "environment")) as fh: for line in fh.readlines(): line = line.strip() @@ -43,8 +46,10 @@ def start_microservice(app_location,tracing=False,grpc=False,envs={}): "seldon-core-microservice", env_vars["MODEL_NAME"], env_vars["API_TYPE"], - "--service-type", env_vars["SERVICE_TYPE"], - "--persistence", env_vars["PERSISTENCE"] + "--service-type", + env_vars["SERVICE_TYPE"], + "--persistence", + env_vars["PERSISTENCE"], ) if tracing: cmd = cmd + ("--tracing",) @@ -67,61 +72,70 @@ def start_microservice(app_location,tracing=False,grpc=False,envs={}): os.killpg(os.getpgid(p.pid), signal.SIGTERM) -@pytest.mark.parametrize( - 'tracing', [(False), (True)] - ) +@pytest.mark.parametrize("tracing", [(False), (True)]) def test_model_template_app_rest(tracing): - with start_microservice(join(dirname(__file__), "model-template-app"),tracing=tracing): + with start_microservice( + join(dirname(__file__), "model-template-app"), tracing=tracing + ): data = '{"data":{"names":["a","b"],"ndarray":[[1.0,2.0]]}}' response = requests.get( - "http://127.0.0.1:5000/predict", params="json=%s" % data) + "http://127.0.0.1:5000/predict", params="json=%s" % data + ) response.raise_for_status() assert response.json() == { - 'data': {'names': ['t:0', 't:1'], 'ndarray': [[1.0, 2.0]]}, 'meta': {}} - - data = ('{"request":{"data":{"names":["a","b"],"ndarray":[[1.0,2.0]]}},' - '"response":{"meta":{"routing":{"router":0}},"data":{"names":["a","b"],' - '"ndarray":[[1.0,2.0]]}},"reward":1}') + "data": {"names": ["t:0", "t:1"], "ndarray": [[1.0, 2.0]]}, + "meta": {}, + } + + data = ( + '{"request":{"data":{"names":["a","b"],"ndarray":[[1.0,2.0]]}},' + '"response":{"meta":{"routing":{"router":0}},"data":{"names":["a","b"],' + '"ndarray":[[1.0,2.0]]}},"reward":1}' + ) response = requests.get( - "http://127.0.0.1:5000/send-feedback", params="json=%s" % data) + "http://127.0.0.1:5000/send-feedback", params="json=%s" % data + ) response.raise_for_status() - assert response.json() == {'data': {'ndarray': []}, 'meta': {}} + assert response.json() == {"data": {"ndarray": []}, "meta": {}} -@pytest.mark.parametrize( - 'tracing', [(False), (True)] - ) +@pytest.mark.parametrize("tracing", [(False), (True)]) def test_model_template_app_rest_submodule(tracing): - with start_microservice(join(dirname(__file__), "model-template-app2"),tracing=tracing): + with start_microservice( + join(dirname(__file__), "model-template-app2"), tracing=tracing + ): data = '{"data":{"names":["a","b"],"ndarray":[[1.0,2.0]]}}' response = requests.get( - "http://127.0.0.1:5000/predict", params="json=%s" % data) + "http://127.0.0.1:5000/predict", params="json=%s" % data + ) response.raise_for_status() assert response.json() == { - 'data': {'names': ['t:0', 't:1'], 'ndarray': [[1.0, 2.0]]}, 'meta': {}} - - data = ('{"request":{"data":{"names":["a","b"],"ndarray":[[1.0,2.0]]}},' - '"response":{"meta":{"routing":{"router":0}},"data":{"names":["a","b"],' - '"ndarray":[[1.0,2.0]]}},"reward":1}') + "data": {"names": ["t:0", "t:1"], "ndarray": [[1.0, 2.0]]}, + "meta": {}, + } + + data = ( + '{"request":{"data":{"names":["a","b"],"ndarray":[[1.0,2.0]]}},' + '"response":{"meta":{"routing":{"router":0}},"data":{"names":["a","b"],' + '"ndarray":[[1.0,2.0]]}},"reward":1}' + ) response = requests.get( - "http://127.0.0.1:5000/send-feedback", params="json=%s" % data) + "http://127.0.0.1:5000/send-feedback", params="json=%s" % data + ) response.raise_for_status() - assert response.json() == {'data': {'ndarray': []}, 'meta': {}} + assert response.json() == {"data": {"ndarray": []}, "meta": {}} -@pytest.mark.parametrize( - 'tracing', [(False), (True)] - ) +@pytest.mark.parametrize("tracing", [(False), (True)]) def test_model_template_app_grpc(tracing): - with start_microservice(join(dirname(__file__), "model-template-app"),tracing=tracing,grpc=True): - data = np.array([[1,2]]) + with start_microservice( + join(dirname(__file__), "model-template-app"), tracing=tracing, grpc=True + ): + data = np.array([[1, 2]]) datadef = prediction_pb2.DefaultData( - tensor = prediction_pb2.Tensor( - shape = data.shape, - values = data.flatten() - ) + tensor=prediction_pb2.Tensor(shape=data.shape, values=data.flatten()) ) - request = prediction_pb2.SeldonMessage(data = datadef) + request = prediction_pb2.SeldonMessage(data=datadef) channel = grpc.insecure_channel("0.0.0.0:5000") stub = prediction_pb2_grpc.ModelStub(channel) response = stub.Predict(request=request) @@ -132,62 +146,82 @@ def test_model_template_app_grpc(tracing): arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) - feedback = prediction_pb2.Feedback(request=request,reward=1.0) + feedback = prediction_pb2.Feedback(request=request, reward=1.0) response = stub.SendFeedback(request=request) def test_model_template_app_tracing_config(): - envs = {"JAEGER_CONFIG_PATH":join(dirname(__file__), "tracing_config/tracing.yaml")} - with start_microservice(join(dirname(__file__), "model-template-app"),tracing=True,envs=envs): + envs = { + "JAEGER_CONFIG_PATH": join(dirname(__file__), "tracing_config/tracing.yaml") + } + with start_microservice( + join(dirname(__file__), "model-template-app"), tracing=True, envs=envs + ): data = '{"data":{"names":["a","b"],"ndarray":[[1.0,2.0]]}}' response = requests.get( - "http://127.0.0.1:5000/predict", params="json=%s" % data) + "http://127.0.0.1:5000/predict", params="json=%s" % data + ) response.raise_for_status() assert response.json() == { - 'data': {'names': ['t:0', 't:1'], 'ndarray': [[1.0, 2.0]]}, 'meta': {}} - - data = ('{"request":{"data":{"names":["a","b"],"ndarray":[[1.0,2.0]]}},' - '"response":{"meta":{"routing":{"router":0}},"data":{"names":["a","b"],' - '"ndarray":[[1.0,2.0]]}},"reward":1}') + "data": {"names": ["t:0", "t:1"], "ndarray": [[1.0, 2.0]]}, + "meta": {}, + } + + data = ( + '{"request":{"data":{"names":["a","b"],"ndarray":[[1.0,2.0]]}},' + '"response":{"meta":{"routing":{"router":0}},"data":{"names":["a","b"],' + '"ndarray":[[1.0,2.0]]}},"reward":1}' + ) response = requests.get( - "http://127.0.0.1:5000/send-feedback", params="json=%s" % data) + "http://127.0.0.1:5000/send-feedback", params="json=%s" % data + ) response.raise_for_status() - assert response.json() == {'data': {'ndarray': []}, 'meta': {}} + assert response.json() == {"data": {"ndarray": []}, "meta": {}} def test_model_template_bad_params(): - params = [join(dirname(__file__), "model-template-app"),"seldon-core-microservice","REST","--parameters",'[{"type":"FLOAT","name":"foo","value":"abc"}]'] - with mock.patch('sys.argv',params): + params = [ + join(dirname(__file__), "model-template-app"), + "seldon-core-microservice", + "REST", + "--parameters", + '[{"type":"FLOAT","name":"foo","value":"abc"}]', + ] + with mock.patch("sys.argv", params): with pytest.raises(SeldonMicroserviceException): microservice.main() def test_model_template_bad_params_type(): - params = [join(dirname(__file__), "model-template-app"),"seldon-core-microservice","REST","--parameters",'[{"type":"FOO","name":"foo","value":"abc"}]'] - with mock.patch('sys.argv',params): + params = [ + join(dirname(__file__), "model-template-app"), + "seldon-core-microservice", + "REST", + "--parameters", + '[{"type":"FOO","name":"foo","value":"abc"}]', + ] + with mock.patch("sys.argv", params): with pytest.raises(SeldonMicroserviceException): microservice.main() -@mock.patch('seldon_core.microservice.os.path.isfile', return_value=True) +@mock.patch("seldon_core.microservice.os.path.isfile", return_value=True) def test_load_annotations(mock_isfile): from io import StringIO + read_data = [ - ('', {}), - ('\n\n', {}), - ('foo=bar', {'foo': 'bar'}), - ('foo=bar\nx =y', {'foo': 'bar', 'x': 'y'}), - ('foo=bar\nfoo=baz\n', {'foo': 'baz'}), - (' foo = bar ', {'foo': 'bar'}), - ('key = assign===', {'key': 'assign==='}), - ('foo=\nfoo', {'foo': ''}), + ("", {}), + ("\n\n", {}), + ("foo=bar", {"foo": "bar"}), + ("foo=bar\nx =y", {"foo": "bar", "x": "y"}), + ("foo=bar\nfoo=baz\n", {"foo": "baz"}), + (" foo = bar ", {"foo": "bar"}), + ("key = assign===", {"key": "assign==="}), + ("foo=\nfoo", {"foo": ""}), ] for data, expected_annotation in read_data: - with mock.patch('seldon_core.microservice.open', return_value=StringIO(data)): + with mock.patch("seldon_core.microservice.open", return_value=StringIO(data)): assert microservice.load_annotations() == expected_annotation diff --git a/python/tests/test_microservice_tester.py b/python/tests/test_microservice_tester.py index efa262540a..9dea3ec4b6 100644 --- a/python/tests/test_microservice_tester.py +++ b/python/tests/test_microservice_tester.py @@ -1,5 +1,9 @@ -from seldon_core.microservice_tester import run_method, run_send_feedback, reconciliate_cont_type, \ - SeldonTesterException +from seldon_core.microservice_tester import ( + run_method, + run_send_feedback, + reconciliate_cont_type, + SeldonTesterException, +) from unittest import mock from seldon_core.utils import array_to_grpc_datadef, seldon_message_to_json from seldon_core.proto import prediction_pb2 @@ -33,34 +37,66 @@ def __init__(self, adict): self.__dict__.update(adict) -@mock.patch('requests.post', side_effect=mocked_requests_post_success) +@mock.patch("requests.post", side_effect=mocked_requests_post_success) def test_predict_rest(mock_post): filename = join(dirname(__file__), "model-template-app", "contract.json") - args_dict = {"contract": filename, "host": "a", "port": 1000, "n_requests": 1, "batch_size": 1, - "endpoint": "predict", "prnt": True, "grpc": False, "tensor": True} + args_dict = { + "contract": filename, + "host": "a", + "port": 1000, + "n_requests": 1, + "batch_size": 1, + "endpoint": "predict", + "prnt": True, + "grpc": False, + "tensor": True, + } args = Bunch(args_dict) - run_method(args,"predict") + run_method(args, "predict") print(mock_post.call_args[1]) payload = json.loads(mock_post.call_args[1]["data"]["json"]) - assert payload["data"]["names"] == ["sepal_length", "sepal_width", "petal_length","petal_width"] + assert payload["data"]["names"] == [ + "sepal_length", + "sepal_width", + "petal_length", + "petal_width", + ] -@mock.patch('requests.post', side_effect=mocked_requests_post_success) +@mock.patch("requests.post", side_effect=mocked_requests_post_success) def test_feedback_rest(mock_post): filename = join(dirname(__file__), "model-template-app", "contract.json") - args_dict = {"contract": filename, "host": "a", "port": 1000, "n_requests": 1, "batch_size": 1, - "endpoint": "feedback", "prnt": True, "grpc": False, "tensor": True} + args_dict = { + "contract": filename, + "host": "a", + "port": 1000, + "n_requests": 1, + "batch_size": 1, + "endpoint": "feedback", + "prnt": True, + "grpc": False, + "tensor": True, + } args = Bunch(args_dict) run_send_feedback(args) -@mock.patch('requests.post', side_effect=mocked_requests_post_success) +@mock.patch("requests.post", side_effect=mocked_requests_post_success) def test_predict_rest_categorical(mock_post): filename = join(dirname(__file__), "resources", "contract.json") - args_dict = {"contract": filename, "host": "a", "port": 1000, "n_requests": 1, "batch_size": 1, - "endpoint": "predict", "prnt": True, "grpc": False, "tensor": False} + args_dict = { + "contract": filename, + "host": "a", + "port": 1000, + "n_requests": 1, + "batch_size": 1, + "endpoint": "predict", + "prnt": True, + "grpc": False, + "tensor": False, + } args = Bunch(args_dict) - run_method(args,"predict") + run_method(args, "predict") def test_reconciliate_exception(): @@ -72,7 +108,16 @@ def test_reconciliate_exception(): def test_bad_contract(): with pytest.raises(SeldonTesterException): filename = join(dirname(__file__), "resources", "bad_contract.json") - args_dict = {"contract": filename, "host": "a", "port": 1000, "n_requests": 1, "batch_size": 1, - "endpoint": "feedback", "prnt": True, "grpc": False, "tensor": True} + args_dict = { + "contract": filename, + "host": "a", + "port": 1000, + "n_requests": 1, + "batch_size": 1, + "endpoint": "feedback", + "prnt": True, + "grpc": False, + "tensor": True, + } args = Bunch(args_dict) run_send_feedback(args) diff --git a/python/tests/test_model_microservice.py b/python/tests/test_model_microservice.py index d9a6d72e94..47a2f1e6b5 100644 --- a/python/tests/test_model_microservice.py +++ b/python/tests/test_model_microservice.py @@ -33,8 +33,11 @@ ------- the checksum """ + + def rs232_checksum(the_bytes): - return b'%02X' % (sum(the_bytes) & 0xFF) + return b"%02X" % (sum(the_bytes) & 0xFF) + class UserObject(SeldonComponent): def __init__(self, metrics_ok=True, ret_nparray=False, ret_meta=False): @@ -89,10 +92,7 @@ def predict_rest(self, request): def predict_grpc(self, request): arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) return request @@ -106,21 +106,31 @@ def send_feedback_grpc(self, request): class UserObjectLowLevelWithPredictRaw(SeldonComponent): def __init__(self, check_name): - self.check_name=check_name + self.check_name = check_name + def predict_raw(self, msg): - msg=json_to_seldon_message(msg) - if self.check_name == 'img': - file_data=msg.binData - img = Image.open(io.BytesIO (file_data)) + msg = json_to_seldon_message(msg) + if self.check_name == "img": + file_data = msg.binData + img = Image.open(io.BytesIO(file_data)) img.verify() - return {"meta": seldon_message_to_json(msg.meta),"data": {"ndarray": [rs232_checksum(file_data).decode('utf-8')]}} - elif self.check_name == 'txt': - file_data=msg.binData - return {"meta": seldon_message_to_json(msg.meta),"data": {"ndarray": [file_data.decode('utf-8')]}} - elif self.check_name == 'strData': - file_data=msg.strData - return {"meta": seldon_message_to_json(msg.meta), "data": {"ndarray": [file_data]}} - + return { + "meta": seldon_message_to_json(msg.meta), + "data": {"ndarray": [rs232_checksum(file_data).decode("utf-8")]}, + } + elif self.check_name == "txt": + file_data = msg.binData + return { + "meta": seldon_message_to_json(msg.meta), + "data": {"ndarray": [file_data.decode("utf-8")]}, + } + elif self.check_name == "strData": + file_data = msg.strData + return { + "meta": seldon_message_to_json(msg.meta), + "data": {"ndarray": [file_data]}, + } + class UserObjectLowLevelGrpc(SeldonComponent): def __init__(self, metrics_ok=True, ret_nparray=False): @@ -131,10 +141,7 @@ def __init__(self, metrics_ok=True, ret_nparray=False): def predict_grpc(self, request): arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) return request @@ -160,11 +167,14 @@ def test_model_ok(): assert j["data"]["names"] == ["t:0", "t:1"] assert j["data"]["ndarray"] == [[1.0, 2.0]] + def test_model_puid_ok(): user_object = UserObject() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/predict?json={"meta":{"puid":"123"},"data":{"names":["a","b"],"ndarray":[[1,2]]}}') + rv = client.get( + '/predict?json={"meta":{"puid":"123"},"data":{"names":["a","b"],"ndarray":[[1,2]]}}' + ) j = json.loads(rv.data) print(j) assert rv.status_code == 200 @@ -173,7 +183,8 @@ def test_model_puid_ok(): assert j["meta"]["metrics"][0]["value"] == user_object.metrics()[0]["value"] assert j["data"]["names"] == ["t:0", "t:1"] assert j["data"]["ndarray"] == [[1.0, 2.0]] - assert j["meta"]["puid"] == '123' + assert j["meta"]["puid"] == "123" + def test_model_lowlevel_ok(): user_object = UserObjectLowLevel() @@ -185,43 +196,78 @@ def test_model_lowlevel_ok(): assert rv.status_code == 200 assert j["data"]["ndarray"] == [9, 9] + def test_model_lowlevel_multi_form_data_text_file_ok(): - user_object = UserObjectLowLevelWithPredictRaw('txt') + user_object = UserObjectLowLevelWithPredictRaw("txt") app = get_rest_microservice(user_object) client = app.test_client() - rv = client.post('/predict',data={"meta":'{"puid":"1234"}',"binData":(f'./tests/resources/test.txt','test.txt')},content_type='multipart/form-data') + rv = client.post( + "/predict", + data={ + "meta": '{"puid":"1234"}', + "binData": (f"./tests/resources/test.txt", "test.txt"), + }, + content_type="multipart/form-data", + ) j = json.loads(rv.data) assert rv.status_code == 200 assert j["meta"]["puid"] == "1234" - assert j["data"]["ndarray"][0] == "this is test file for testing multipart/form-data input\n" + assert ( + j["data"]["ndarray"][0] + == "this is test file for testing multipart/form-data input\n" + ) + def test_model_lowlevel_multi_form_data_img_file_ok(): - user_object = UserObjectLowLevelWithPredictRaw('img') + user_object = UserObjectLowLevelWithPredictRaw("img") app = get_rest_microservice(user_object) client = app.test_client() - rv = client.post('/predict',data={"meta":'{"puid":"1234"}',"binData":(f'./tests/resources/test.png','test.png')},content_type='multipart/form-data') + rv = client.post( + "/predict", + data={ + "meta": '{"puid":"1234"}', + "binData": (f"./tests/resources/test.png", "test.png"), + }, + content_type="multipart/form-data", + ) j = json.loads(rv.data) assert rv.status_code == 200 assert j["meta"]["puid"] == "1234" - with open('./tests/resources/test.png',"rb") as f: - img_data=f.read() - assert j["data"]["ndarray"][0] == rs232_checksum(img_data).decode('utf-8') + with open("./tests/resources/test.png", "rb") as f: + img_data = f.read() + assert j["data"]["ndarray"][0] == rs232_checksum(img_data).decode("utf-8") + def test_model_lowlevel_multi_form_data_strData_ok(): - user_object = UserObjectLowLevelWithPredictRaw('strData') + user_object = UserObjectLowLevelWithPredictRaw("strData") app = get_rest_microservice(user_object) client = app.test_client() - rv = client.post('/predict',data={"meta":'{"puid":"1234"}',"strData":(f'./tests/resources/test.txt','test.txt')},content_type='multipart/form-data') + rv = client.post( + "/predict", + data={ + "meta": '{"puid":"1234"}', + "strData": (f"./tests/resources/test.txt", "test.txt"), + }, + content_type="multipart/form-data", + ) j = json.loads(rv.data) assert rv.status_code == 200 assert j["meta"]["puid"] == "1234" - assert j["data"]["ndarray"][0] == "this is test file for testing multipart/form-data input\n" + assert ( + j["data"]["ndarray"][0] + == "this is test file for testing multipart/form-data input\n" + ) + def test_model_multi_form_data_ok(): user_object = UserObject() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.post('/predict',data={"data":'{"names":["a","b"],"ndarray":[[1,2]]}'},content_type='multipart/form-data') + rv = client.post( + "/predict", + data={"data": '{"names":["a","b"],"ndarray":[[1,2]]}'}, + content_type="multipart/form-data", + ) j = json.loads(rv.data) print(j) assert rv.status_code == 200 @@ -231,11 +277,14 @@ def test_model_multi_form_data_ok(): assert j["data"]["names"] == ["t:0", "t:1"] assert j["data"]["ndarray"] == [[1.0, 2.0]] + def test_model_feedback_ok(): user_object = UserObject() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/send-feedback?json={"request":{"data":{"ndarray":[]}},"reward":1.0}') + rv = client.get( + '/send-feedback?json={"request":{"data":{"ndarray":[]}},"reward":1.0}' + ) j = json.loads(rv.data) print(j) assert rv.status_code == 200 @@ -245,7 +294,9 @@ def test_model_feedback_lowlevel_ok(): user_object = UserObjectLowLevel() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/send-feedback?json={"request":{"data":{"ndarray":[]}},"reward":1.0}') + rv = client.get( + '/send-feedback?json={"request":{"data":{"ndarray":[]}},"reward":1.0}' + ) j = json.loads(rv.data) print(j) assert rv.status_code == 200 @@ -257,22 +308,19 @@ def test_model_tftensor_ok(): app = get_rest_microservice(user_object) client = app.test_client() arr = np.array([1, 2]) - datadef = prediction_pb2.DefaultData( - tftensor=tf.make_tensor_proto(arr) - ) + datadef = prediction_pb2.DefaultData(tftensor=tf.make_tensor_proto(arr)) request = prediction_pb2.SeldonMessage(data=datadef) jStr = json_format.MessageToJson(request) - rv = client.get('/predict?json=' + jStr) + rv = client.get("/predict?json=" + jStr) j = json.loads(rv.data) print(j) assert rv.status_code == 200 assert j["meta"]["tags"] == {"mytag": 1} assert j["meta"]["metrics"][0]["key"] == user_object.metrics()[0]["key"] assert j["meta"]["metrics"][0]["value"] == user_object.metrics()[0]["value"] - assert 'tftensor' in j['data'] + assert "tftensor" in j["data"] tfp = TensorProto() - json_format.ParseDict(j['data'].get("tftensor"), - tfp, ignore_unknown_fields=False) + json_format.ParseDict(j["data"].get("tftensor"), tfp, ignore_unknown_fields=False) arr2 = tf.make_ndarray(tfp) assert np.array_equal(arr, arr2) @@ -281,8 +329,7 @@ def test_model_ok_with_names(): user_object = UserObject() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get( - '/predict?json={"data":{"names":["a","b"],"ndarray":[[1,2]]}}') + rv = client.get('/predict?json={"data":{"names":["a","b"],"ndarray":[[1,2]]}}') j = json.loads(rv.data) assert rv.status_code == 200 assert j["meta"]["tags"] == {"mytag": 1} @@ -295,11 +342,10 @@ def test_model_bin_data(): app = get_rest_microservice(user_object) client = app.test_client() bdata = b"123" - bdata_base64 = base64.b64encode(bdata).decode('utf-8') + bdata_base64 = base64.b64encode(bdata).decode("utf-8") rv = client.get('/predict?json={"binData":"' + bdata_base64 + '"}') j = json.loads(rv.data) - return_data = \ - base64.b64encode(base64.b64encode(bdata)).decode('utf-8') + return_data = base64.b64encode(base64.b64encode(bdata)).decode("utf-8") assert rv.status_code == 200 assert j["binData"] == return_data assert j["meta"]["tags"] == {"mytag": 1} @@ -355,7 +401,7 @@ def test_model_no_json(): app = get_rest_microservice(user_object) client = app.test_client() uo = UserObject() - rv = client.get('/predict?') + rv = client.get("/predict?") j = json.loads(rv.data) print(j) assert rv.status_code == 400 @@ -372,7 +418,7 @@ def test_model_bad_metrics(): def test_model_error_status_code(): - class ErrorUserObject(): + class ErrorUserObject: def predict(self, X, features_names, **kwargs): raise SeldonMicroserviceException("foo", status_code=403) @@ -398,6 +444,7 @@ def test_model_gets_meta(): assert j["meta"]["metrics"][0]["key"] == user_object.metrics()[0]["key"] assert j["meta"]["metrics"][0]["value"] == user_object.metrics()[0]["value"] + def test_model_seldon_json_ok(): user_object = UserObject() app = get_rest_microservice(user_object) @@ -405,15 +452,13 @@ def test_model_seldon_json_ok(): rv = client.get("/seldon.json") assert rv.status_code == 200 + def test_proto_ok(): user_object = UserObject() app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) resp = app.Predict(request, None) @@ -432,10 +477,7 @@ def test_proto_lowlevel(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) resp = app.Predict(request, None) @@ -451,10 +493,7 @@ def test_proto_feedback(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) feedback = prediction_pb2.Feedback(request=request, reward=1.0) @@ -466,10 +505,7 @@ def test_proto_feedback_custom(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) feedback = prediction_pb2.Feedback(request=request, reward=1.0) @@ -481,9 +517,7 @@ def test_proto_tftensor_ok(): user_object = UserObject() app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) - datadef = prediction_pb2.DefaultData( - tftensor=tf.make_tensor_proto(arr) - ) + datadef = prediction_pb2.DefaultData(tftensor=tf.make_tensor_proto(arr)) request = prediction_pb2.SeldonMessage(data=datadef) resp = app.Predict(request, None) jStr = json_format.MessageToJson(resp) @@ -528,10 +562,7 @@ def test_proto_gets_meta(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) meta = prediction_pb2.Meta() metaJson = {"puid": "abc"} @@ -588,7 +619,9 @@ def feedback(self, features, feature_names, reward, truth): user_object = CustomSeldonComponent() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/send-feedback?json={"request":{"data":{"ndarray":[]}},"reward":1.0}') + rv = client.get( + '/send-feedback?json={"request":{"data":{"ndarray":[]}},"reward":1.0}' + ) j = json.loads(rv.data) print(j) @@ -603,7 +636,9 @@ def feedback(self, features, feature_names, reward, truth): user_object = CustomObject() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/send-feedback?json={"request":{"data":{"ndarray":[]}},"reward":1.0}') + rv = client.get( + '/send-feedback?json={"request":{"data":{"ndarray":[]}},"reward":1.0}' + ) j = json.loads(rv.data) print(j) diff --git a/python/tests/test_router_microservice.py b/python/tests/test_router_microservice.py index a8ebae21c3..be4afa0bc6 100644 --- a/python/tests/test_router_microservice.py +++ b/python/tests/test_router_microservice.py @@ -40,10 +40,7 @@ def route_rest(self, request): def route_grpc(self, request): arr = np.array([1]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(1, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(1, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) return request @@ -64,10 +61,7 @@ def __init__(self, metrics_ok=True, ret_nparray=False): def route_grpc(self, request): arr = np.array([1]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(1, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(1, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) return request @@ -83,18 +77,14 @@ def __init__(self, metrics_ok=True, ret_nparray=False): self.nparray = np.array([1, 2, 3]) def route_raw( - self, - request: Union[prediction_pb2.SeldonMessage, List, Dict]) \ - -> Union[prediction_pb2.SeldonMessage, List, Dict]: + self, request: Union[prediction_pb2.SeldonMessage, List, Dict] + ) -> Union[prediction_pb2.SeldonMessage, List, Dict]: is_proto = isinstance(request, prediction_pb2.SeldonMessage) arr = np.array([1]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(1, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(1, 1), values=arr) ) response = prediction_pb2.SeldonMessage(data=datadef) if is_proto: @@ -162,7 +152,7 @@ def test_router_no_json(): app = get_rest_microservice(user_object) client = app.test_client() uo = UserObject() - rv = client.get('/route?') + rv = client.get("/route?") j = json.loads(rv.data) print(j) assert rv.status_code == 400 @@ -183,7 +173,8 @@ def test_router_feedback_ok(): app = get_rest_microservice(user_object) client = app.test_client() rv = client.get( - '/send-feedback?json={"request":{"data":{"ndarray":[]}},"response":{"meta":{"routing":{"1":1}}},"reward":1.0}') + '/send-feedback?json={"request":{"data":{"ndarray":[]}},"response":{"meta":{"routing":{"1":1}}},"reward":1.0}' + ) j = json.loads(rv.data) print(j) assert rv.status_code == 200 @@ -193,7 +184,9 @@ def test_router_feedback_lowlevel_ok(): user_object = UserObjectLowLevel() app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/send-feedback?json={"request":{"data":{"ndarray":[]}},"reward":1.0}') + rv = client.get( + '/send-feedback?json={"request":{"data":{"ndarray":[]}},"reward":1.0}' + ) j = json.loads(rv.data) print(j) assert rv.status_code == 200 @@ -204,10 +197,7 @@ def test_router_proto_ok(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) resp = app.Route(request, None) @@ -227,10 +217,7 @@ def test_router_proto_lowlevel_ok(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) resp = app.Route(request, None) @@ -246,10 +233,7 @@ def test_router_proto_lowlevel_raw_ok(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) resp = app.Route(request, None) @@ -265,10 +249,7 @@ def test_proto_feedback(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) meta = prediction_pb2.Meta() metaJson = {} diff --git a/python/tests/test_seldon_client.py b/python/tests/test_seldon_client.py index 6be765723a..2e0e4eb229 100644 --- a/python/tests/test_seldon_client.py +++ b/python/tests/test_seldon_client.py @@ -1,4 +1,8 @@ -from seldon_core.seldon_client import SeldonClient, SeldonClientPrediction, SeldonClientCombine +from seldon_core.seldon_client import ( + SeldonClient, + SeldonClientPrediction, + SeldonClientCombine, +) from unittest import mock from seldon_core.utils import array_to_grpc_datadef, seldon_message_to_json from seldon_core.proto import prediction_pb2, prediction_pb2_grpc @@ -28,7 +32,7 @@ def mocked_requests_post_success(url, *args, **kwargs): return MockResponse(json, 200, text="{}") -@mock.patch('requests.post', side_effect=mocked_requests_post_404) +@mock.patch("requests.post", side_effect=mocked_requests_post_404) def test_predict_rest_404(mock_post): sc = SeldonClient(deployment_name="404") response = sc.predict() @@ -36,7 +40,7 @@ def test_predict_rest_404(mock_post): assert response.msg == "404:Not Found" -@mock.patch('requests.post', side_effect=mocked_requests_post_success) +@mock.patch("requests.post", side_effect=mocked_requests_post_success) def test_predict_rest(mock_post): sc = SeldonClient(deployment_name="mymodel") response = sc.predict() @@ -46,26 +50,29 @@ def test_predict_rest(mock_post): assert mock_post.call_count == 1 -@mock.patch('requests.post', side_effect=mocked_requests_post_success) +@mock.patch("requests.post", side_effect=mocked_requests_post_success) def test_predict_rest_with_names(mock_post): sc = SeldonClient(deployment_name="mymodel") - response = sc.predict(names=["a","b"]) + response = sc.predict(names=["a", "b"]) assert mock_post.call_args[1]["json"]["data"]["names"] == ["a", "b"] assert response.success == True assert response.response.data.tensor.shape == [1, 1] assert mock_post.call_count == 1 -@mock.patch('requests.post', side_effect=mocked_requests_post_success) +@mock.patch("requests.post", side_effect=mocked_requests_post_success) def test_predict_rest_with_ambassador_prefix(mock_post): sc = SeldonClient(deployment_name="mymodel") - response = sc.predict(gateway="ambassador",transport="rest",gateway_prefix="/mycompany/ml") + response = sc.predict( + gateway="ambassador", transport="rest", gateway_prefix="/mycompany/ml" + ) assert mock_post.call_args[0][0].index("/mycompany/ml") > 0 assert response.success == True assert response.response.data.tensor.shape == [1, 1] assert mock_post.call_count == 1 -@mock.patch('requests.post', side_effect=mocked_requests_post_success) + +@mock.patch("requests.post", side_effect=mocked_requests_post_success) def test_predict_microservice_rest(mock_post): sc = SeldonClient(deployment_name="mymodel") response = sc.microservice(method="predict") @@ -75,11 +82,14 @@ def test_predict_microservice_rest(mock_post): assert mock_post.call_count == 1 -@mock.patch('requests.post', side_effect=mocked_requests_post_success) +@mock.patch("requests.post", side_effect=mocked_requests_post_success) def test_feedback_microservice_rest(mock_post): sc = SeldonClient(deployment_name="mymodel") - response = sc.microservice_feedback(prediction_request=prediction_pb2.SeldonMessage(), - prediction_response=prediction_pb2.SeldonMessage(), reward=1.0) + response = sc.microservice_feedback( + prediction_request=prediction_pb2.SeldonMessage(), + prediction_response=prediction_pb2.SeldonMessage(), + reward=1.0, + ) print(response) assert response.success == True assert response.response.data.tensor.shape == [1, 1] @@ -87,7 +97,6 @@ def test_feedback_microservice_rest(mock_post): class MyStub(object): - def __init__(self, channel): self.channel = channel @@ -108,20 +117,24 @@ def mock_grpc_stub_predict(channel): return MyStub() -def mock_get_token(oauth_key: str = "", oauth_secret: str = "", namespace: str = None, - endpoint: str = "localhost:8002"): +def mock_get_token( + oauth_key: str = "", + oauth_secret: str = "", + namespace: str = None, + endpoint: str = "localhost:8002", +): return "1234" -@mock.patch('seldon_core.seldon_client.prediction_pb2_grpc.SeldonStub', new=MyStub) +@mock.patch("seldon_core.seldon_client.prediction_pb2_grpc.SeldonStub", new=MyStub) def test_predict_grpc_ambassador(): sc = SeldonClient(deployment_name="mymodel", transport="grpc", gateway="ambassador") response = sc.predict() assert response.response.strData == "predict" -@mock.patch('seldon_core.seldon_client.prediction_pb2_grpc.SeldonStub', new=MyStub) -@mock.patch('seldon_core.seldon_client.get_token', side_effect=mock_get_token) +@mock.patch("seldon_core.seldon_client.prediction_pb2_grpc.SeldonStub", new=MyStub) +@mock.patch("seldon_core.seldon_client.get_token", side_effect=mock_get_token) def test_predict_grpc_seldon(mock_get_token): sc = SeldonClient(deployment_name="mymodel", transport="grpc", gateway="seldon") response = sc.predict() @@ -129,28 +142,28 @@ def test_predict_grpc_seldon(mock_get_token): assert mock_get_token.call_count == 1 -@mock.patch('seldon_core.seldon_client.prediction_pb2_grpc.ModelStub', new=MyStub) +@mock.patch("seldon_core.seldon_client.prediction_pb2_grpc.ModelStub", new=MyStub) def test_predict_grpc_microservice_predict(): sc = SeldonClient(transport="grpc") response = sc.microservice(method="predict") assert response.response.strData == "predict" -@mock.patch('seldon_core.seldon_client.prediction_pb2_grpc.GenericStub', new=MyStub) +@mock.patch("seldon_core.seldon_client.prediction_pb2_grpc.GenericStub", new=MyStub) def test_predict_grpc_microservice_transform_input(): sc = SeldonClient(transport="grpc") response = sc.microservice(method="transform-input") assert response.response.strData == "transform-input" -@mock.patch('seldon_core.seldon_client.prediction_pb2_grpc.GenericStub', new=MyStub) +@mock.patch("seldon_core.seldon_client.prediction_pb2_grpc.GenericStub", new=MyStub) def test_predict_grpc_microservice_transform_output(): sc = SeldonClient(transport="grpc") response = sc.microservice(method="transform-output") assert response.response.strData == "transform-output" -@mock.patch('seldon_core.seldon_client.prediction_pb2_grpc.GenericStub', new=MyStub) +@mock.patch("seldon_core.seldon_client.prediction_pb2_grpc.GenericStub", new=MyStub) def test_predict_grpc_microservice_transform_route(): sc = SeldonClient(transport="grpc") response = sc.microservice(method="route") @@ -161,74 +174,111 @@ def test_predict_grpc_microservice_transform_route(): # Wiring Tests # -@mock.patch('seldon_core.seldon_client.microservice_api_rest_seldon_message', - return_value=SeldonClientPrediction(None, None)) + +@mock.patch( + "seldon_core.seldon_client.microservice_api_rest_seldon_message", + return_value=SeldonClientPrediction(None, None), +) def test_wiring_microservice_api_rest_seldon_message(mock_handler): sc = SeldonClient() response = sc.microservice(transport="rest", method="predict") assert mock_handler.call_count == 1 -@mock.patch('seldon_core.seldon_client.microservice_api_rest_aggregate', return_value=SeldonClientCombine(None, None)) +@mock.patch( + "seldon_core.seldon_client.microservice_api_rest_aggregate", + return_value=SeldonClientCombine(None, None), +) def test_wiring_microservice_api_rest_aggregate(mock_handler): sc = SeldonClient() response = sc.microservice(transport="rest", method="aggregate") assert mock_handler.call_count == 1 -@mock.patch('seldon_core.seldon_client.microservice_api_rest_feedback', return_value=SeldonClientCombine(None, None)) +@mock.patch( + "seldon_core.seldon_client.microservice_api_rest_feedback", + return_value=SeldonClientCombine(None, None), +) def test_wiring_microservice_api_rest_feedback(mock_handler): sc = SeldonClient() - response = sc.microservice_feedback(prediction_pb2.SeldonMessage(), prediction_pb2.SeldonMessage(), 1.0, - transport="rest") + response = sc.microservice_feedback( + prediction_pb2.SeldonMessage(), + prediction_pb2.SeldonMessage(), + 1.0, + transport="rest", + ) assert mock_handler.call_count == 1 -@mock.patch('seldon_core.seldon_client.microservice_api_grpc_seldon_message', - return_value=SeldonClientPrediction(None, None)) +@mock.patch( + "seldon_core.seldon_client.microservice_api_grpc_seldon_message", + return_value=SeldonClientPrediction(None, None), +) def test_wiring_microservice_api_grpc_seldon_message(mock_handler): sc = SeldonClient() response = sc.microservice(transport="grpc", method="predict") assert mock_handler.call_count == 1 -@mock.patch('seldon_core.seldon_client.microservice_api_grpc_aggregate', return_value=SeldonClientCombine(None, None)) +@mock.patch( + "seldon_core.seldon_client.microservice_api_grpc_aggregate", + return_value=SeldonClientCombine(None, None), +) def test_wiring_microservice_api_grpc_aggregate(mock_handler): sc = SeldonClient() response = sc.microservice(transport="grpc", method="aggregate") assert mock_handler.call_count == 1 -@mock.patch('seldon_core.seldon_client.microservice_api_grpc_feedback', return_value=SeldonClientCombine(None, None)) +@mock.patch( + "seldon_core.seldon_client.microservice_api_grpc_feedback", + return_value=SeldonClientCombine(None, None), +) def test_wiring_microservice_api_grpc_feedback(mock_handler): sc = SeldonClient() - response = sc.microservice_feedback(prediction_pb2.SeldonMessage(), prediction_pb2.SeldonMessage(), 1.0, - transport="grpc") + response = sc.microservice_feedback( + prediction_pb2.SeldonMessage(), + prediction_pb2.SeldonMessage(), + 1.0, + transport="grpc", + ) assert mock_handler.call_count == 1 -@mock.patch('seldon_core.seldon_client.rest_predict_gateway', return_value=SeldonClientPrediction(None, None)) +@mock.patch( + "seldon_core.seldon_client.rest_predict_gateway", + return_value=SeldonClientPrediction(None, None), +) def test_wiring_rest_predict_ambassador(mock_rest_predict_ambassador): sc = SeldonClient(deployment_name="mymodel") response = sc.predict(gateway="ambassador", transport="rest") assert mock_rest_predict_ambassador.call_count == 1 -@mock.patch('seldon_core.seldon_client.grpc_predict_gateway', return_value=SeldonClientPrediction(None, None)) +@mock.patch( + "seldon_core.seldon_client.grpc_predict_gateway", + return_value=SeldonClientPrediction(None, None), +) def test_wiring_grpc_predict_ambassador(mock_grpc_predict_ambassador): sc = SeldonClient(deployment_name="mymodel") response = sc.predict(gateway="ambassador", transport="grpc") assert mock_grpc_predict_ambassador.call_count == 1 -@mock.patch('seldon_core.seldon_client.rest_predict_seldon_oauth', return_value=SeldonClientPrediction(None, None)) +@mock.patch( + "seldon_core.seldon_client.rest_predict_seldon_oauth", + return_value=SeldonClientPrediction(None, None), +) def test_wiring_rest_predict_seldon_oauth(mock_rest_predict_seldon_oauth): sc = SeldonClient(deployment_name="mymodel") response = sc.predict(gateway="seldon", transport="rest") assert mock_rest_predict_seldon_oauth.call_count == 1 -@mock.patch('seldon_core.seldon_client.grpc_predict_seldon_oauth', return_value=SeldonClientPrediction(None, None)) +@mock.patch( + "seldon_core.seldon_client.grpc_predict_seldon_oauth", + return_value=SeldonClientPrediction(None, None), +) def test_wiring_grpc_predict_seldon_oauth(mock_grpc_predict_seldon_oauth): sc = SeldonClient(deployment_name="mymodel") response = sc.predict(gateway="seldon", transport="grpc") diff --git a/python/tests/test_transformer_microservice.py b/python/tests/test_transformer_microservice.py index 74d578cb33..1b354d3df3 100644 --- a/python/tests/test_transformer_microservice.py +++ b/python/tests/test_transformer_microservice.py @@ -67,10 +67,7 @@ def transform_output_rest(self, X): def transform_input_grpc(self, X): arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) return request @@ -78,10 +75,7 @@ def transform_input_grpc(self, X): def transform_output_grpc(self, X): arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) return request @@ -96,10 +90,7 @@ def __init__(self, metrics_ok=True, ret_nparray=False): def transform_input_grpc(self, X): arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) return request @@ -107,10 +98,7 @@ def transform_input_grpc(self, X): def transform_output_grpc(self, X): arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) return request @@ -123,18 +111,14 @@ def __init__(self, metrics_ok=True, ret_nparray=False): self.nparray = np.array([1, 2, 3]) def transform_input_raw( - self, - request: Union[prediction_pb2.SeldonMessage, List, Dict]) \ - -> Union[prediction_pb2.SeldonMessage, List, Dict]: + self, request: Union[prediction_pb2.SeldonMessage, List, Dict] + ) -> Union[prediction_pb2.SeldonMessage, List, Dict]: is_proto = isinstance(request, prediction_pb2.SeldonMessage) arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) response = prediction_pb2.SeldonMessage(data=datadef) if is_proto: @@ -143,18 +127,14 @@ def transform_input_raw( return seldon_message_to_json(response) def transform_output_raw( - self, - request: Union[prediction_pb2.SeldonMessage, List, Dict]) \ - -> Union[prediction_pb2.SeldonMessage, List, Dict]: + self, request: Union[prediction_pb2.SeldonMessage, List, Dict] + ) -> Union[prediction_pb2.SeldonMessage, List, Dict]: is_proto = isinstance(request, prediction_pb2.SeldonMessage) arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) response = prediction_pb2.SeldonMessage(data=datadef) @@ -171,18 +151,14 @@ def __init__(self, metrics_ok=True, ret_nparray=False): self.nparray = np.array([1, 2, 3]) def transform_input_raw( - self, - request: Union[prediction_pb2.SeldonMessage, List, Dict]) \ - -> Union[prediction_pb2.SeldonMessage, List, Dict]: + self, request: Union[prediction_pb2.SeldonMessage, List, Dict] + ) -> Union[prediction_pb2.SeldonMessage, List, Dict]: is_proto = isinstance(request, prediction_pb2.SeldonMessage) arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) response = prediction_pb2.SeldonMessage(data=datadef) if is_proto: @@ -191,18 +167,14 @@ def transform_input_raw( return seldon_message_to_json(response) def transform_output_raw( - self, - request: Union[prediction_pb2.SeldonMessage, List, Dict]) \ - -> Union[prediction_pb2.SeldonMessage, List, Dict]: + self, request: Union[prediction_pb2.SeldonMessage, List, Dict] + ) -> Union[prediction_pb2.SeldonMessage, List, Dict]: is_proto = isinstance(request, prediction_pb2.SeldonMessage) arr = np.array([9, 9]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) response = prediction_pb2.SeldonMessage(data=datadef) if is_proto: @@ -264,7 +236,7 @@ def test_transformer_input_bin_data(): app = get_rest_microservice(user_object) client = app.test_client() bdata = b"123" - bdata_base64 = base64.b64encode(bdata).decode('utf-8') + bdata_base64 = base64.b64encode(bdata).decode("utf-8") rv = client.get('/transform-input?json={"binData":"' + bdata_base64 + '"}') j = json.loads(rv.data) sm = prediction_pb2.SeldonMessage() @@ -283,7 +255,7 @@ def test_transformer_input_bin_data_nparray(): app = get_rest_microservice(user_object) client = app.test_client() bdata = b"123" - bdata_base64 = base64.b64encode(bdata).decode('utf-8') + bdata_base64 = base64.b64encode(bdata).decode("utf-8") rv = client.get('/transform-input?json={"binData":"' + bdata_base64 + '"}') j = json.loads(rv.data) print(j) @@ -299,7 +271,7 @@ def test_tranform_input_no_json(): app = get_rest_microservice(user_object) client = app.test_client() uo = UserObject() - rv = client.get('/transform-input?') + rv = client.get("/transform-input?") j = json.loads(rv.data) print(j) assert rv.status_code == 400 @@ -319,7 +291,9 @@ def test_transform_input_gets_meta(): user_object = UserObject(ret_meta=True) app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/transform-input?json={"meta":{"puid": "abc"},"data":{"ndarray":[]}}') + rv = client.get( + '/transform-input?json={"meta":{"puid": "abc"},"data":{"ndarray":[]}}' + ) j = json.loads(rv.data) print(j) assert rv.status_code == 200 @@ -332,7 +306,9 @@ def test_transform_output_gets_meta(): user_object = UserObject(ret_meta=True) app = get_rest_microservice(user_object) client = app.test_client() - rv = client.get('/transform-output?json={"meta":{"puid": "abc"},"data":{"ndarray":[]}}') + rv = client.get( + '/transform-output?json={"meta":{"puid": "abc"},"data":{"ndarray":[]}}' + ) j = json.loads(rv.data) print(j) assert rv.status_code == 200 @@ -393,9 +369,8 @@ def test_transformer_output_bin_data(): app = get_rest_microservice(user_object) client = app.test_client() bdata = b"123" - bdata_base64 = base64.b64encode(bdata).decode('utf-8') - rv = client.get( - '/transform-output?json={"binData":"' + bdata_base64 + '"}') + bdata_base64 = base64.b64encode(bdata).decode("utf-8") + rv = client.get('/transform-output?json={"binData":"' + bdata_base64 + '"}') j = json.loads(rv.data) sm = prediction_pb2.SeldonMessage() # Check we can parse response @@ -413,7 +388,7 @@ def test_transformer_output_bin_data_nparray(): app = get_rest_microservice(user_object) client = app.test_client() bdata = b"123" - bdata_base64 = base64.b64encode(bdata).decode('utf-8') + bdata_base64 = base64.b64encode(bdata).decode("utf-8") rv = client.get('/transform-output?json={"binData":"' + bdata_base64 + '"}') j = json.loads(rv.data) print(j) @@ -429,7 +404,7 @@ def test_tranform_output_no_json(): app = get_rest_microservice(user_object) client = app.test_client() uo = UserObject() - rv = client.get('/transform-output?') + rv = client.get("/transform-output?") j = json.loads(rv.data) print(j) assert rv.status_code == 400 @@ -450,10 +425,7 @@ def test_transform_input_proto_ok(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) resp = app.TransformInput(request, None) @@ -473,10 +445,7 @@ def test_transform_input_proto_lowlevel_ok(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) resp = app.TransformInput(request, None) @@ -513,10 +482,7 @@ def test_transform_output_proto_ok(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) resp = app.TransformOutput(request, None) @@ -536,10 +502,7 @@ def test_transform_output_proto_lowlevel_ok(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) resp = app.TransformOutput(request, None) @@ -581,10 +544,7 @@ def test_transform_input_proto_gets_meta(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) meta = prediction_pb2.Meta() metaJson = {"puid": "abc"} @@ -607,10 +567,7 @@ def test_transform_output_proto_gets_meta(): app = SeldonModelGRPC(user_object) arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) meta = prediction_pb2.Meta() metaJson = {"puid": "abc"} diff --git a/python/tests/test_user_model.py b/python/tests/test_user_model.py index efeb4c0a27..9b64942a31 100644 --- a/python/tests/test_user_model.py +++ b/python/tests/test_user_model.py @@ -2,22 +2,28 @@ from seldon_core.user_model import SeldonComponent, client_class_names import logging + class UserObjectClassAttr(SeldonComponent): def __init__(self, metrics_ok=True, ret_nparray=False, ret_meta=False): - self.class_names = ["a","b"] + self.class_names = ["a", "b"] class UserObjectClassMethod(SeldonComponent): def class_names(self): - return ["x","y"] + return ["x", "y"] + def test_class_names_attr(caplog): caplog.set_level(logging.INFO) user_object = UserObjectClassAttr() - predictions = np.array([[1,2],[3,4]]) - names = client_class_names(user_object,predictions) - assert names == ["a","b"] - assert "class_names attribute is deprecated. Please define a class_names method" in caplog.text + predictions = np.array([[1, 2], [3, 4]]) + names = client_class_names(user_object, predictions) + assert names == ["a", "b"] + assert ( + "class_names attribute is deprecated. Please define a class_names method" + in caplog.text + ) + def test_class_names_method(caplog): caplog.set_level(logging.INFO) @@ -25,7 +31,11 @@ def test_class_names_method(caplog): predictions = np.array([[1, 2], [3, 4]]) names = client_class_names(user_object, predictions) assert names == ["x", "y"] - assert not "class_names attribute is deprecated. Please define a class_names method" in caplog.text + assert ( + not "class_names attribute is deprecated. Please define a class_names method" + in caplog.text + ) + def test_no_class_names_on_seldon_component(caplog): caplog.set_level(logging.INFO) @@ -34,12 +44,13 @@ def test_no_class_names_on_seldon_component(caplog): names = client_class_names(user_object, predictions) assert names == ["t:0", "t:1"] + def test_no_class_names(caplog): caplog.set_level(logging.INFO) - class X(): + + class X: pass + user_object = X() predictions = np.array([[1, 2], [3, 4]]) names = client_class_names(user_object, predictions) - - diff --git a/python/tests/test_utils.py b/python/tests/test_utils.py index 2f6bb3f6e4..5e7053a0f5 100644 --- a/python/tests/test_utils.py +++ b/python/tests/test_utils.py @@ -13,6 +13,7 @@ if not _TF_MISSING: import tensorflow as tf + class UserObject(object): def __init__(self, metrics_ok=True, ret_nparray=False, ret_meta=False): self.metrics_ok = metrics_ok @@ -53,18 +54,16 @@ def metrics(self): else: return [{"type": "BAD", "key": "mycounter", "value": 1}] + def test_create_rest_reponse_nparray(): user_model = UserObject() request = {} raw_response = np.array([[1, 2, 3]]) - result = scu.construct_response_json( - user_model, - True, - request, - raw_response) + result = scu.construct_response_json(user_model, True, request, raw_response) assert "tensor" in result.get("data", {}) assert result["data"]["tensor"]["values"] == [1, 2, 3] + def test_create_grpc_reponse_nparray(): user_model = UserObject() request = prediction_pb2.SeldonMessage() @@ -73,28 +72,21 @@ def test_create_grpc_reponse_nparray(): assert sm.data.WhichOneof("data_oneof") == "tensor" assert sm.data.tensor.values == [1, 2, 3] + def test_create_rest_reponse_text_ndarray(): user_model = UserObject() request_data = np.array([["hello", "world"], ["hello", "another", "world"]]) - request = { - "data": { - "ndarray": request_data, - "names": [] - } - } + request = {"data": {"ndarray": request_data, "names": []}} (features, meta, datadef, data_type) = scu.extract_request_parts_json(request) raw_response = np.array([["hello", "world"], ["here", "another"]]) - result = scu.construct_response_json( - user_model, - True, - request, - raw_response) + result = scu.construct_response_json(user_model, True, request, raw_response) assert "ndarray" in result.get("data", {}) assert np.array_equal(result["data"]["ndarray"], raw_response) assert datadef == request["data"] assert np.array_equal(features, request_data) assert data_type == "data" + def test_create_grpc_reponse_text_ndarray(): user_model = UserObject() request_data = np.array([["hello", "world"], ["hello", "another", "world"]]) @@ -110,23 +102,16 @@ def test_create_grpc_reponse_text_ndarray(): assert np.array_equal(features, request_data) assert data_type == "data" + def test_create_rest_reponse_ndarray(): user_model = UserObject() - request = { - "data": { - "ndarray": np.array([[5, 6, 7]]), - "names": [] - } - } + request = {"data": {"ndarray": np.array([[5, 6, 7]]), "names": []}} raw_response = np.array([[1, 2, 3]]) - result = scu.construct_response_json( - user_model, - True, - request, - raw_response) + result = scu.construct_response_json(user_model, True, request, raw_response) assert "ndarray" in result.get("data", {}) assert np.array_equal(result["data"]["ndarray"], raw_response) + def test_create_grpc_reponse_ndarray(): user_model = UserObject() request_data = np.array([[5, 6, 7]]) @@ -136,27 +121,16 @@ def test_create_grpc_reponse_ndarray(): sm = scu.construct_response(user_model, True, request, raw_response) assert sm.data.WhichOneof("data_oneof") == "ndarray" + def test_create_rest_reponse_tensor(): user_model = UserObject() - tensor = { - "values": [1,2,3], - "shape": (3,) - } - request = { - "data": { - "tensor": tensor, - "names": [] - } - } + tensor = {"values": [1, 2, 3], "shape": (3,)} + request = {"data": {"tensor": tensor, "names": []}} raw_response = np.array([1, 2, 3]) - result = scu.construct_response_json( - user_model, - True, - request, - raw_response) + result = scu.construct_response_json(user_model, True, request, raw_response) assert "tensor" in result.get("data", {}) - assert np.array_equal( - result["data"]["tensor"], tensor) + assert np.array_equal(result["data"]["tensor"], tensor) + def test_create_grpc_reponse_tensor(): user_model = UserObject() @@ -167,22 +141,18 @@ def test_create_grpc_reponse_tensor(): sm = scu.construct_response(user_model, True, request, raw_response) assert sm.data.WhichOneof("data_oneof") == "tensor" + def test_create_rest_response_strdata(): user_model = UserObject() request_data = "Request data" - request = { - "strData": request_data - } + request = {"strData": request_data} raw_response = "hello world" - sm = scu.construct_response_json( - user_model, - True, - request, - raw_response) + sm = scu.construct_response_json(user_model, True, request, raw_response) assert "strData" in sm assert len(sm["strData"]) > 0 assert sm["strData"] == raw_response + def test_create_grpc_response_strdata(): user_model = UserObject() request_data = np.array([[5, 6, 7]]) @@ -205,26 +175,31 @@ def test_create_grpc_response_jsondata(): emptyValue = Value() assert sm.jsonData != emptyValue + def test_create_rest_response_jsondata(): user_model = UserObject() request_data = np.array([[5, 6, 7]]) datadef = scu.array_to_rest_datadef("ndarray", request_data) - json_request = { "jsonData": datadef } + json_request = {"jsonData": datadef} raw_response = {"output": "data"} - json_response = scu.construct_response_json(user_model, True, json_request, raw_response) + json_response = scu.construct_response_json( + user_model, True, json_request, raw_response + ) assert "data" not in json_response emptyValue = Value() assert json_response["jsonData"] != emptyValue + def test_symmetric_json_conversion(): user_model = UserObject() request_data = np.array([[5, 6, 7]]) datadef = scu.array_to_rest_datadef("ndarray", request_data) - json_request = { "jsonData": datadef } + json_request = {"jsonData": datadef} seldon_message_request = scu.json_to_seldon_message(json_request) result_json_request = scu.seldon_message_to_json(seldon_message_request) assert json_request == result_json_request + def test_create_grpc_reponse_list(): user_model = UserObject() request_data = np.array([[5, 6, 7]]) @@ -234,12 +209,11 @@ def test_create_grpc_reponse_list(): sm = scu.construct_response(user_model, True, request, raw_response) assert sm.data.WhichOneof("data_oneof") == "ndarray" + def test_create_rest_reponse_binary(): user_model = UserObject() request_data = b"input" - request = { - "binData": request_data - } + request = {"binData": request_data} raw_resp = b"binary" sm = scu.construct_response_json(user_model, True, request, raw_resp) resp_data = base64.b64encode(raw_resp).decode("utf-8") @@ -247,6 +221,7 @@ def test_create_rest_reponse_binary(): assert "binData" in sm assert sm["binData"] == resp_data + def test_create_grpc_reponse_binary(): user_model = UserObject() request_data = np.array([[5, 6, 7]]) @@ -258,6 +233,7 @@ def test_create_grpc_reponse_binary(): assert len(sm.strData) == 0 assert len(sm.binData) > 0 + def test_json_to_seldon_message_normal_data(): data = {"data": {"tensor": {"shape": [1, 1], "values": [1]}}} requestProto = scu.json_to_seldon_message(data) @@ -271,6 +247,7 @@ def test_json_to_seldon_message_normal_data(): assert arr.shape[1] == 1 assert arr[0][0] == 1 + def test_json_to_seldon_message_ndarray(): data = {"data": {"ndarray": [[1]]}} requestProto = scu.json_to_seldon_message(data) @@ -281,10 +258,11 @@ def test_json_to_seldon_message_ndarray(): assert arr.shape[1] == 1 assert arr[0][0] == 1 + def test_json_to_seldon_message_bin_data(): a = np.array([1, 2, 3]) serialized = pickle.dumps(a) - bdata_base64 = base64.b64encode(serialized).decode('utf-8') + bdata_base64 = base64.b64encode(serialized).decode("utf-8") data = {"binData": bdata_base64} requestProto = scu.json_to_seldon_message(data) assert len(requestProto.data.tensor.values) == 0 @@ -319,8 +297,11 @@ def test_json_to_seldon_message_bad_data(): def test_json_to_feedback(): - data = {"request": {"data": {"tensor": {"shape": [1, 1], "values": [1]}}}, - "response": {"data": {"tensor": {"shape": [1, 1], "values": [2]}}}, "reward": 1.0} + data = { + "request": {"data": {"tensor": {"shape": [1, 1], "values": [1]}}}, + "response": {"data": {"tensor": {"shape": [1, 1], "values": [2]}}}, + "reward": 1.0, + } requestProto = scu.json_to_feedback(data) assert requestProto.request.data.tensor.values == [1.0] assert requestProto.response.data.tensor.values == [2.0] @@ -328,14 +309,21 @@ def test_json_to_feedback(): def test_json_to_feedback_bad_data(): with pytest.raises(SeldonMicroserviceException): - data = {"requestBAD": {"data": {"tensor": {"shape": [1, 1], "values": [1]}}}, - "response": {"data": {"tensor": {"shape": [1, 1], "values": [2]}}}, "reward": 1.0} + data = { + "requestBAD": {"data": {"tensor": {"shape": [1, 1], "values": [1]}}}, + "response": {"data": {"tensor": {"shape": [1, 1], "values": [2]}}}, + "reward": 1.0, + } requestProto = scu.json_to_feedback(data) def test_json_to_seldon_messages(): - data = {"seldonMessages": [{"data": {"tensor": {"shape": [1, 1], "values": [1]}}}, - {"data": {"tensor": {"shape": [1, 1], "values": [2]}}}]} + data = { + "seldonMessages": [ + {"data": {"tensor": {"shape": [1, 1], "values": [1]}}}, + {"data": {"tensor": {"shape": [1, 1], "values": [2]}}}, + ] + } requestProto = scu.json_to_seldon_messages(data) assert requestProto.seldonMessages[0].data.tensor.values == [1] assert requestProto.seldonMessages[1].data.tensor.values == [2] @@ -345,10 +333,7 @@ def test_json_to_seldon_messages(): def test_seldon_message_to_json(): arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) dict = scu.seldon_message_to_json(request) @@ -358,10 +343,7 @@ def test_seldon_message_to_json(): def test_get_data_from_proto_tensor(): arr = np.array([1, 2]) datadef = prediction_pb2.DefaultData( - tensor=prediction_pb2.Tensor( - shape=(2, 1), - values=arr - ) + tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr) ) request = prediction_pb2.SeldonMessage(data=datadef) arr: np.ndarray = scu.get_data_from_proto(request) @@ -373,9 +355,7 @@ def test_get_data_from_proto_tensor(): def test_get_data_from_proto_ndarray(): arr = np.array([[1], [2]]) lv = scu.array_to_list_value(arr) - datadef = prediction_pb2.DefaultData( - ndarray=lv - ) + datadef = prediction_pb2.DefaultData(ndarray=lv) request = prediction_pb2.SeldonMessage(data=datadef) arr: np.ndarray = scu.get_data_from_proto(request) assert arr.shape == (2, 1) @@ -386,9 +366,7 @@ def test_get_data_from_proto_ndarray(): @skipif_tf_missing def test_get_data_from_proto_tftensor(): arr = np.array([[1], [2]]) - datadef = prediction_pb2.DefaultData( - tftensor=tf.make_tensor_proto(arr) - ) + datadef = prediction_pb2.DefaultData(tftensor=tf.make_tensor_proto(arr)) request = prediction_pb2.SeldonMessage(data=datadef) arr: np.ndarray = scu.get_data_from_proto(request) assert arr.shape == (2, 1) @@ -411,8 +389,7 @@ def test_proto_tftensor_to_array(): names = ["a", "b"] array = np.array([[1, 2], [3, 4]]) datadef = prediction_pb2.DefaultData( - names=names, - tftensor=tf.make_tensor_proto(array) + names=names, tftensor=tf.make_tensor_proto(array) ) array2 = scu.grpc_datadef_to_array(datadef) assert array.shape == array2.shape diff --git a/python/tests/utils.py b/python/tests/utils.py index 233f5367b8..535ab65f59 100644 --- a/python/tests/utils.py +++ b/python/tests/utils.py @@ -1,5 +1,4 @@ import pytest from seldon_core.tf_helper import _TF_MISSING -skipif_tf_missing = pytest.mark.skipif( - _TF_MISSING, reason="tensorflow is not present") +skipif_tf_missing = pytest.mark.skipif(_TF_MISSING, reason="tensorflow is not present") diff --git a/testing/docker/fixed-model/ModelV1.py b/testing/docker/fixed-model/ModelV1.py index 6b995b59e8..337f220c38 100644 --- a/testing/docker/fixed-model/ModelV1.py +++ b/testing/docker/fixed-model/ModelV1.py @@ -1,15 +1,11 @@ - class ModelV1(object): - def __init__(self): print("Initialising") - def predict(self,X,features_names): + def predict(self, X, features_names): print("Predict called") - return [1,2,3,4] + return [1, 2, 3, 4] - def send_feedback(self,features,feature_names,reward,truth): + def send_feedback(self, features, feature_names, reward, truth): print("Send feedback called") return [] - - diff --git a/testing/docker/fixed-model/ModelV2.py b/testing/docker/fixed-model/ModelV2.py index 9a8a7685c6..098d11bb80 100644 --- a/testing/docker/fixed-model/ModelV2.py +++ b/testing/docker/fixed-model/ModelV2.py @@ -1,15 +1,11 @@ - class ModelV2(object): - def __init__(self): print("Initialising") - def predict(self,X,features_names): + def predict(self, X, features_names): print("Predict called") - return [5,6,7,8] + return [5, 6, 7, 8] - def send_feedback(self,features,feature_names,reward,truth): + def send_feedback(self, features, feature_names, reward, truth): print("Send feedback called") return [] - - diff --git a/testing/s2i/python/combiner/MyCombiner.py b/testing/s2i/python/combiner/MyCombiner.py index 8243bf5b59..3ecf5867d2 100644 --- a/testing/s2i/python/combiner/MyCombiner.py +++ b/testing/s2i/python/combiner/MyCombiner.py @@ -1,6 +1,6 @@ import logging -logger = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class MyCombiner(object): @@ -10,5 +10,4 @@ def __init__(self, metrics_ok=True): def aggregate(self, Xs, features_names): print("Combiner aggregate called") logger.info(Xs) - return Xs[0]+1 - + return Xs[0] + 1 diff --git a/testing/s2i/python/model/MyModel.py b/testing/s2i/python/model/MyModel.py index 7dcf6c9382..789cf09481 100644 --- a/testing/s2i/python/model/MyModel.py +++ b/testing/s2i/python/model/MyModel.py @@ -1,13 +1,9 @@ - - - class MyModel(object): def __init__(self, metrics_ok=True): print("Init called") def predict(self, X, features_names): - return X+1 + return X + 1 - def send_feedback(self,features, feature_names, routing, reward, truth): + def send_feedback(self, features, feature_names, routing, reward, truth): print("Feedback called") - diff --git a/testing/s2i/python/router/MyRouter.py b/testing/s2i/python/router/MyRouter.py index 4422b9030c..d0ef86cef1 100644 --- a/testing/s2i/python/router/MyRouter.py +++ b/testing/s2i/python/router/MyRouter.py @@ -1,6 +1,3 @@ - - - class MyRouter(object): def __init__(self, metrics_ok=True): print("Init called") @@ -8,6 +5,5 @@ def __init__(self, metrics_ok=True): def route(self, X, features_names): return 0 - def send_feedback(self,features, feature_names, routing, reward, truth): + def send_feedback(self, features, feature_names, routing, reward, truth): print("Feedback called") - diff --git a/testing/s2i/python/transformer/MyTransformer.py b/testing/s2i/python/transformer/MyTransformer.py index 8638c74222..5a6c8f0be8 100644 --- a/testing/s2i/python/transformer/MyTransformer.py +++ b/testing/s2i/python/transformer/MyTransformer.py @@ -1,12 +1,9 @@ - - - class MyTransformer(object): def __init__(self, metrics_ok=True): print("Init called") def transform_input(self, X, features_names): - return X+1 + return X + 1 def transform_output(self, X, features_names): - return X+1 + return X + 1 diff --git a/testing/scripts/conftest.py b/testing/scripts/conftest.py index 81747aff3d..4fa3b25167 100644 --- a/testing/scripts/conftest.py +++ b/testing/scripts/conftest.py @@ -2,11 +2,14 @@ from k8s_utils import * from s2i_utils import * + @pytest.fixture(scope="module") def s2i_python_version(): return do_s2i_python_version() + #### Implementations below + def do_s2i_python_version(): return get_s2i_python_version() diff --git a/testing/scripts/k8s_utils.py b/testing/scripts/k8s_utils.py index 75e7e659e9..ec47cf5e2f 100644 --- a/testing/scripts/k8s_utils.py +++ b/testing/scripts/k8s_utils.py @@ -8,6 +8,7 @@ API_AMBASSADOR = "localhost:8003" + def wait_for_shutdown(deploymentName): ret = run("kubectl get deploy/" + deploymentName, shell=True) while ret.returncode == 0: @@ -16,9 +17,9 @@ def wait_for_shutdown(deploymentName): def get_seldon_version(): - completedProcess = Popen("cat ../../version.txt", shell=True, stdout=subprocess.PIPE) + completedProcess = Popen( + "cat ../../version.txt", shell=True, stdout=subprocess.PIPE + ) output = completedProcess.stdout.readline() - version = output.decode('utf-8').strip() + version = output.decode("utf-8").strip() return version - - diff --git a/testing/scripts/s2i_utils.py b/testing/scripts/s2i_utils.py index 5eecec6c8f..eb51673d2c 100644 --- a/testing/scripts/s2i_utils.py +++ b/testing/scripts/s2i_utils.py @@ -1,9 +1,13 @@ import subprocess -from subprocess import run,Popen +from subprocess import run, Popen + def get_s2i_python_version(): - completedProcess = Popen("cd ../../wrappers/s2i/python && grep 'IMAGE_VERSION=' Makefile | cut -d'=' -f2", shell=True, stdout=subprocess.PIPE) + completedProcess = Popen( + "cd ../../wrappers/s2i/python && grep 'IMAGE_VERSION=' Makefile | cut -d'=' -f2", + shell=True, + stdout=subprocess.PIPE, + ) output = completedProcess.stdout.readline() - version = output.decode('utf-8').rstrip() + version = output.decode("utf-8").rstrip() return version - diff --git a/testing/scripts/seldon_utils.py b/testing/scripts/seldon_utils.py index be569fc19b..be41956a8e 100644 --- a/testing/scripts/seldon_utils.py +++ b/testing/scripts/seldon_utils.py @@ -6,11 +6,14 @@ import numpy as np from k8s_utils import * + def wait_for_rollout(deploymentName): ret = run("kubectl rollout status -n test1 deploy/" + deploymentName, shell=True) while ret.returncode > 0: time.sleep(1) - ret = run("kubectl rollout status -n test1 deploy/" + deploymentName, shell=True) + ret = run( + "kubectl rollout status -n test1 deploy/" + deploymentName, shell=True + ) def rest_request(model, namespace): @@ -43,77 +46,151 @@ def initial_rest_request(model, namespace): return r -def create_random_data(data_size,rows=1): - shape = [rows,data_size] - arr = np.random.rand(rows*data_size) - return (shape,arr) +def create_random_data(data_size, rows=1): + shape = [rows, data_size] + arr = np.random.rand(rows * data_size) + return (shape, arr) -@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000, stop_max_attempt_number=5) -def rest_request_ambassador(deploymentName,namespace,endpoint="localhost:8003",data_size=5,rows=1,data=None): +@retry( + wait_exponential_multiplier=1000, + wait_exponential_max=10000, + stop_max_attempt_number=5, +) +def rest_request_ambassador( + deploymentName, namespace, endpoint="localhost:8003", data_size=5, rows=1, data=None +): if data is None: - shape, arr = create_random_data(data_size,rows) + shape, arr = create_random_data(data_size, rows) else: shape = data.shape arr = data.flatten() - payload = {"data":{"names":["a","b"],"tensor":{"shape":shape,"values":arr.tolist()}}} + payload = { + "data": { + "names": ["a", "b"], + "tensor": {"shape": shape, "values": arr.tolist()}, + } + } if namespace is None: response = requests.post( - "http://"+endpoint+"/seldon/"+deploymentName+"/api/v0.1/predictions", - json=payload) + "http://" + + endpoint + + "/seldon/" + + deploymentName + + "/api/v0.1/predictions", + json=payload, + ) else: response = requests.post( - "http://"+endpoint+"/seldon/"+namespace+"/"+deploymentName+"/api/v0.1/predictions", - json=payload) + "http://" + + endpoint + + "/seldon/" + + namespace + + "/" + + deploymentName + + "/api/v0.1/predictions", + json=payload, + ) return response -@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000, stop_max_attempt_number=5) -def rest_request_ambassador_auth(deploymentName,namespace,username,password,endpoint="localhost:8003",data_size=5,rows=1,data=None): + +@retry( + wait_exponential_multiplier=1000, + wait_exponential_max=10000, + stop_max_attempt_number=5, +) +def rest_request_ambassador_auth( + deploymentName, + namespace, + username, + password, + endpoint="localhost:8003", + data_size=5, + rows=1, + data=None, +): if data is None: - shape, arr = create_random_data(data_size,rows) + shape, arr = create_random_data(data_size, rows) else: shape = data.shape arr = data.flatten() - payload = {"data":{"names":["a","b"],"tensor":{"shape":shape,"values":arr.tolist()}}} + payload = { + "data": { + "names": ["a", "b"], + "tensor": {"shape": shape, "values": arr.tolist()}, + } + } if namespace is None: response = requests.post( - "http://"+endpoint+"/seldon/"+deploymentName+"/api/v0.1/predictions", + "http://" + + endpoint + + "/seldon/" + + deploymentName + + "/api/v0.1/predictions", json=payload, - auth=HTTPBasicAuth(username, password)) + auth=HTTPBasicAuth(username, password), + ) else: response = requests.post( - "http://"+endpoint+"/seldon/"+namespace+"/"+deploymentName+"/api/v0.1/predictions", + "http://" + + endpoint + + "/seldon/" + + namespace + + "/" + + deploymentName + + "/api/v0.1/predictions", json=payload, - auth=HTTPBasicAuth(username, password)) + auth=HTTPBasicAuth(username, password), + ) return response -@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000, stop_max_attempt_number=5) -def grpc_request_ambassador(deploymentName,namespace,endpoint="localhost:8004",data_size=5,rows=1,data=None): + +@retry( + wait_exponential_multiplier=1000, + wait_exponential_max=10000, + stop_max_attempt_number=5, +) +def grpc_request_ambassador( + deploymentName, namespace, endpoint="localhost:8004", data_size=5, rows=1, data=None +): if data is None: - shape, arr = create_random_data(data_size,rows) + shape, arr = create_random_data(data_size, rows) else: shape = data.shape arr = data.flatten() datadef = prediction_pb2.DefaultData( - tensor = prediction_pb2.Tensor( - shape = shape, - values = arr - ) - ) - request = prediction_pb2.SeldonMessage(data = datadef) + tensor=prediction_pb2.Tensor(shape=shape, values=arr) + ) + request = prediction_pb2.SeldonMessage(data=datadef) channel = grpc.insecure_channel(endpoint) stub = prediction_pb2_grpc.SeldonStub(channel) if namespace is None: - metadata = [('seldon',deploymentName)] + metadata = [("seldon", deploymentName)] else: - metadata = [('seldon',deploymentName),('namespace',namespace)] - response = stub.Predict(request=request,metadata=metadata) + metadata = [("seldon", deploymentName), ("namespace", namespace)] + response = stub.Predict(request=request, metadata=metadata) return response -def grpc_request_ambassador2(deploymentName,namespace,endpoint="localhost:8004",data_size=5,rows=1,data=None): + +def grpc_request_ambassador2( + deploymentName, namespace, endpoint="localhost:8004", data_size=5, rows=1, data=None +): try: - return grpc_request_ambassador(deploymentName,namespace,endpoint=endpoint,data_size=data_size,rows=rows,data=data) + return grpc_request_ambassador( + deploymentName, + namespace, + endpoint=endpoint, + data_size=data_size, + rows=rows, + data=data, + ) except: print("Warning - caught exception") - return grpc_request_ambassador(deploymentName,namespace,endpoint=endpoint,data_size=data_size,rows=rows,data=data) - + return grpc_request_ambassador( + deploymentName, + namespace, + endpoint=endpoint, + data_size=data_size, + rows=rows, + data=data, + ) diff --git a/testing/scripts/test_bad_graphs.py b/testing/scripts/test_bad_graphs.py index d58766a337..21fd3ce46c 100644 --- a/testing/scripts/test_bad_graphs.py +++ b/testing/scripts/test_bad_graphs.py @@ -2,9 +2,15 @@ import json from seldon_utils import * + def wait_for_status(name): for attempts in range(7): - completedProcess = run("kubectl get sdep "+name+" -o json -n seldon", shell=True, check=True, stdout=subprocess.PIPE) + completedProcess = run( + "kubectl get sdep " + name + " -o json -n seldon", + shell=True, + check=True, + stdout=subprocess.PIPE, + ) jStr = completedProcess.stdout j = json.loads(jStr) if "status" in j: @@ -13,14 +19,21 @@ def wait_for_status(name): print("Failed to find status - sleeping") time.sleep(5) -class TestBadGraphs(object): +class TestBadGraphs(object): def test_duplicate_predictor_name(self): - ret = run("kubectl apply -f ../resources/bad_duplicate_predictor_name.json -n seldon", shell=True, check=False) + ret = run( + "kubectl apply -f ../resources/bad_duplicate_predictor_name.json -n seldon", + shell=True, + check=False, + ) assert ret.returncode == 1 # Name in graph and that in PodTemplateSpec don't match def test_model_name_mismatch(self): - ret = run("kubectl apply -f ../resources/bad_name_mismatch.json -n seldon", shell=True, check=False) + ret = run( + "kubectl apply -f ../resources/bad_name_mismatch.json -n seldon", + shell=True, + check=False, + ) assert ret.returncode == 1 - diff --git a/testing/scripts/test_helm_charts_clusterwide.py b/testing/scripts/test_helm_charts_clusterwide.py index 027838f91b..6372465272 100644 --- a/testing/scripts/test_helm_charts_clusterwide.py +++ b/testing/scripts/test_helm_charts_clusterwide.py @@ -2,12 +2,14 @@ from seldon_utils import * from k8s_utils import * + def wait_for_shutdown(deploymentName): ret = run("kubectl get -n test1 deploy/" + deploymentName, shell=True) while ret.returncode == 0: time.sleep(1) ret = run("kubectl get -n test1 deploy/" + deploymentName, shell=True) + class TestClusterWide(object): # Test singe model helm script with 4 API methods @@ -15,7 +17,9 @@ def test_single_model(self): run("helm delete mymodel --purge", shell=True) run( "helm install ../../helm-charts/seldon-single-model --name mymodel --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace test1", - shell=True, check=True) + shell=True, + check=True, + ) wait_for_rollout("mymodel-mymodel-7cd068f") initial_rest_request("mymodel", "test1") print("Test Ambassador REST gateway") @@ -33,7 +37,9 @@ def test_abtest_model(self): run("helm delete myabtest --purge", shell=True) run( "helm install ../../helm-charts/seldon-abtest --name myabtest --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace test1", - shell=True, check=True) + shell=True, + check=True, + ) wait_for_rollout("myabtest-myabtest-41de5b8") wait_for_rollout("myabtest-myabtest-df66c5c") initial_rest_request("myabtest", "test1") @@ -43,9 +49,11 @@ def test_abtest_model(self): assert r.status_code == 200 assert len(r.json()["data"]["tensor"]["values"]) == 1 print("Test Ambassador gRPC gateway") - print("WARNING SKIPPING FLAKY AMBASSADOR TEST UNTIL AMBASSADOR GRPC ISSUE FIXED..") - #r = grpc_request_ambassador2("myabtest", "test1", API_AMBASSADOR) - #print(r) + print( + "WARNING SKIPPING FLAKY AMBASSADOR TEST UNTIL AMBASSADOR GRPC ISSUE FIXED.." + ) + # r = grpc_request_ambassador2("myabtest", "test1", API_AMBASSADOR) + # print(r) run("helm delete myabtest --purge", shell=True) # Test MAB Test model helm script with 4 API methods @@ -53,7 +61,9 @@ def test_mab_model(self): run("helm delete mymab --purge", shell=True) run( "helm install ../../helm-charts/seldon-mab --name mymab --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace test1", - shell=True, check=True) + shell=True, + check=True, + ) wait_for_rollout("mymab-mymab-41de5b8") wait_for_rollout("mymab-mymab-b8038b2") wait_for_rollout("mymab-mymab-df66c5c") @@ -64,7 +74,9 @@ def test_mab_model(self): assert r.status_code == 200 assert len(r.json()["data"]["tensor"]["values"]) == 1 print("Test Ambassador gRPC gateway") - print("WARNING SKIPPING FLAKY AMBASSADOR TEST UNTIL AMBASSADOR GRPC ISSUE FIXED..") - #r = grpc_request_ambassador2("mymab", "test1", API_AMBASSADOR) - #print(r) + print( + "WARNING SKIPPING FLAKY AMBASSADOR TEST UNTIL AMBASSADOR GRPC ISSUE FIXED.." + ) + # r = grpc_request_ambassador2("mymab", "test1", API_AMBASSADOR) + # print(r) run("helm delete mymab --purge", shell=True) diff --git a/testing/scripts/test_prepackaged_servers.py b/testing/scripts/test_prepackaged_servers.py index 375e619d40..dfb9a76061 100644 --- a/testing/scripts/test_prepackaged_servers.py +++ b/testing/scripts/test_prepackaged_servers.py @@ -3,59 +3,77 @@ from seldon_utils import * from seldon_core.seldon_client import SeldonClient + def wait_for_status(name): for attempts in range(7): - completedProcess = run("kubectl get sdep "+name+" -o json -n seldon", shell=True, check=True, stdout=subprocess.PIPE) + completedProcess = run( + "kubectl get sdep " + name + " -o json -n seldon", + shell=True, + check=True, + stdout=subprocess.PIPE, + ) jStr = completedProcess.stdout j = json.loads(jStr) - if "status" in j and j['status'] == "Available": + if "status" in j and j["status"] == "Available": return j else: print("Failed to find status - sleeping") time.sleep(5) + def wait_for_rollout(deploymentName): - ret = run("kubectl rollout status deploy/"+deploymentName, shell=True) + ret = run("kubectl rollout status deploy/" + deploymentName, shell=True) while ret.returncode > 0: time.sleep(1) - ret = run("kubectl rollout status deploy/"+deploymentName, shell=True) + ret = run("kubectl rollout status deploy/" + deploymentName, shell=True) + class TestPrepack(object): # Test prepackaged server for sklearn def test_sklearn(self): run("kubectl delete sdep --all", shell=True) - run("kubectl apply -f ../../servers/sklearnserver/samples/iris.yaml", shell=True, check=True) + run( + "kubectl apply -f ../../servers/sklearnserver/samples/iris.yaml", + shell=True, + check=True, + ) wait_for_rollout("iris-default-4903e3c") wait_for_status("sklearn") print("Initial request") - sc = SeldonClient(deployment_name="sklearn",namespace="seldon") - r = sc.predict(gateway="ambassador",transport="rest",shape=(1,4)) + sc = SeldonClient(deployment_name="sklearn", namespace="seldon") + r = sc.predict(gateway="ambassador", transport="rest", shape=(1, 4)) assert r.success print("Success for test_prepack_sklearn") - # Test prepackaged server for tfserving def test_tfserving(self): run("kubectl delete sdep --all", shell=True) - run("kubectl apply -f ../../servers/tfserving/samples/mnist_rest.yaml", shell=True, check=True) + run( + "kubectl apply -f ../../servers/tfserving/samples/mnist_rest.yaml", + shell=True, + check=True, + ) wait_for_rollout("mnist-default-725903e") wait_for_status("tfserving") print("Initial request") - sc = SeldonClient(deployment_name="tfserving",namespace="seldon") - r = sc.predict(gateway="ambassador",transport="rest",shape=(1,784)) + sc = SeldonClient(deployment_name="tfserving", namespace="seldon") + r = sc.predict(gateway="ambassador", transport="rest", shape=(1, 784)) assert r.success print("Success for test_prepack_tfserving") - # Test prepackaged server for xgboost def test_xgboost(self): run("kubectl delete sdep --all", shell=True) - run("kubectl apply -f ../../servers/xgboostserver/samples/iris.yaml", shell=True, check=True) + run( + "kubectl apply -f ../../servers/xgboostserver/samples/iris.yaml", + shell=True, + check=True, + ) wait_for_rollout("iris-default-af1783b") wait_for_status("xgboost") print("Initial request") - sc = SeldonClient(deployment_name="xgboost",namespace="seldon") - r = sc.predict(gateway="ambassador",transport="rest",shape=(1,4)) + sc = SeldonClient(deployment_name="xgboost", namespace="seldon") + r = sc.predict(gateway="ambassador", transport="rest", shape=(1, 4)) assert r.success print("Success for test_prepack_xgboost") diff --git a/testing/scripts/test_rolling_updates.py b/testing/scripts/test_rolling_updates.py index 6493cf41a0..8f76c3fad9 100644 --- a/testing/scripts/test_rolling_updates.py +++ b/testing/scripts/test_rolling_updates.py @@ -1,17 +1,19 @@ from seldon_utils import * from k8s_utils import * + def wait_for_shutdown(deploymentName): - ret = run("kubectl get deploy/"+deploymentName, shell=True) + ret = run("kubectl get deploy/" + deploymentName, shell=True) while ret.returncode == 0: time.sleep(1) - ret = run("kubectl get deploy/"+deploymentName, shell=True) + ret = run("kubectl get deploy/" + deploymentName, shell=True) + def wait_for_rollout(deploymentName): - ret = run("kubectl rollout status deploy/"+deploymentName, shell=True) + ret = run("kubectl rollout status deploy/" + deploymentName, shell=True) while ret.returncode > 0: time.sleep(1) - ret = run("kubectl rollout status deploy/"+deploymentName, shell=True) + ret = run("kubectl rollout status deploy/" + deploymentName, shell=True) class TestRollingHttp(object): @@ -23,9 +25,9 @@ def test_rolling_update1(self): run("kubectl apply -f ../resources/graph1.json", shell=True, check=True) wait_for_rollout("mymodel-mymodel-e2eb561") print("Initial request") - r = initial_rest_request("mymodel","seldon") + r = initial_rest_request("mymodel", "seldon") assert r.status_code == 200 - assert r.json()["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] + assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] run("kubectl apply -f ../resources/graph2.json", shell=True, check=True) i = 0 for i in range(100): @@ -33,8 +35,18 @@ def test_rolling_update1(self): assert r.status_code == 200 res = r.json() print(res) - assert ((res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0]) or (res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.2" and res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0])) - if (not r.status_code == 200) or (res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0]): + assert ( + res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.1" + and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] + ) or ( + res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.2" + and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ) + if (not r.status_code == 200) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ): break time.sleep(1) assert i < 100 @@ -47,9 +59,9 @@ def test_rolling_update2(self): run("kubectl apply -f ../resources/graph1.json", shell=True, check=True) wait_for_rollout("mymodel-mymodel-e2eb561") print("Initial request") - r = initial_rest_request("mymodel","seldon") + r = initial_rest_request("mymodel", "seldon") assert r.status_code == 200 - assert r.json()["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] + assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] run("kubectl apply -f ../resources/graph3.json", shell=True, check=True) i = 0 for i in range(100): @@ -57,8 +69,19 @@ def test_rolling_update2(self): assert r.status_code == 200 res = r.json() print(res) - assert (("complex-model" in res["meta"]["requestPath"] and res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0]) or (res["meta"]["requestPath"]["complex-model2"] == "seldonio/fixed-model:0.2" and res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0])) - if (not r.status_code == 200) or (res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0]): + assert ( + "complex-model" in res["meta"]["requestPath"] + and res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.1" + and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] + ) or ( + res["meta"]["requestPath"]["complex-model2"] + == "seldonio/fixed-model:0.2" + and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ) + if (not r.status_code == 200) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ): break time.sleep(1) assert i < 100 @@ -71,9 +94,9 @@ def test_rolling_update3(self): run("kubectl apply -f ../resources/graph1.json", shell=True, check=True) wait_for_rollout("mymodel-mymodel-e2eb561") print("Initial request") - r = initial_rest_request("mymodel","seldon") + r = initial_rest_request("mymodel", "seldon") assert r.status_code == 200 - assert r.json()["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] + assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] run("kubectl apply -f ../resources/graph4.json", shell=True, check=True) i = 0 for i in range(50): @@ -81,7 +104,14 @@ def test_rolling_update3(self): assert r.status_code == 200 res = r.json() print(res) - assert ((res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0])) + assert res["meta"]["requestPath"][ + "complex-model" + ] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [ + 1.0, + 2.0, + 3.0, + 4.0, + ] time.sleep(1) assert i == 49 print("Success for test_rolling_update3") @@ -93,9 +123,9 @@ def test_rolling_update4(self): run("kubectl apply -f ../resources/graph1.json", shell=True, check=True) wait_for_rollout("mymodel-mymodel-e2eb561") print("Initial request") - r = initial_rest_request("mymodel","seldon") + r = initial_rest_request("mymodel", "seldon") assert r.status_code == 200 - assert r.json()["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] + assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] run("kubectl apply -f ../resources/graph5.json", shell=True, check=True) i = 0 for i in range(50): @@ -103,7 +133,16 @@ def test_rolling_update4(self): assert r.status_code == 200 res = r.json() print(res) - assert (("complex-model" in res["meta"]["requestPath"] and res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0]) or (res["meta"]["requestPath"]["model1"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] and res["meta"]["requestPath"]["model2"] == "seldonio/fixed-model:0.1")) + assert ( + "complex-model" in res["meta"]["requestPath"] + and res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.1" + and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] + ) or ( + res["meta"]["requestPath"]["model1"] == "seldonio/fixed-model:0.1" + and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] + and res["meta"]["requestPath"]["model2"] == "seldonio/fixed-model:0.1" + ) if (not r.status_code == 200) or ("model1" in res["meta"]["requestPath"]): break time.sleep(1) @@ -117,9 +156,9 @@ def test_rolling_update5(self): run("kubectl apply -f ../resources/graph1.json", shell=True, check=True) wait_for_rollout("mymodel-mymodel-e2eb561") print("Initial request") - r = initial_rest_request("mymodel","seldon") + r = initial_rest_request("mymodel", "seldon") assert r.status_code == 200 - assert r.json()["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] + assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] run("kubectl apply -f ../resources/graph6.json", shell=True, check=True) i = 0 for i in range(50): @@ -128,14 +167,24 @@ def test_rolling_update5(self): assert r.status_code == 200 res = r.json() print(res) - assert (("complex-model" in res["meta"]["requestPath"] and res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0]) or (res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.2" and res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0])) - if (not r.status_code == 200) or (res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0]): + assert ( + "complex-model" in res["meta"]["requestPath"] + and res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.1" + and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] + ) or ( + res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.2" + and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ) + if (not r.status_code == 200) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ): break time.sleep(1) assert i < 100 print("Success for test_rolling_update5") - # Test updating a model with a new image version as the only change def test_rolling_update6(self): run("kubectl delete sdep --all", shell=True) @@ -145,9 +194,9 @@ def test_rolling_update6(self): wait_for_rollout("mymodel-mymodel-svc-orch-8e2a24b") wait_for_rollout("mymodel-mymodel-e2eb561") print("Initial request") - r = initial_rest_request("mymodel","seldon") + r = initial_rest_request("mymodel", "seldon") assert r.status_code == 200 - assert r.json()["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] + assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] run("kubectl apply -f ../resources/graph2svc.json", shell=True, check=True) i = 0 for i in range(100): @@ -156,8 +205,18 @@ def test_rolling_update6(self): assert r.status_code == 200 res = r.json() print(res) - assert ((res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0]) or (res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.2" and res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0])) - if (not r.status_code == 200) or (res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0]): + assert ( + res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.1" + and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] + ) or ( + res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.2" + and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ) + if (not r.status_code == 200) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ): break time.sleep(1) assert i < 100 @@ -172,9 +231,9 @@ def test_rolling_update7(self): wait_for_rollout("mymodel-mymodel-svc-orch-8e2a24b") wait_for_rollout("mymodel-mymodel-e2eb561") print("Initial request") - r = initial_rest_request("mymodel","seldon") + r = initial_rest_request("mymodel", "seldon") assert r.status_code == 200 - assert r.json()["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] + assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] run("kubectl apply -f ../resources/graph3svc.json", shell=True, check=True) i = 0 for i in range(100): @@ -183,8 +242,19 @@ def test_rolling_update7(self): assert r.status_code == 200 res = r.json() print(res) - assert (("complex-model" in res["meta"]["requestPath"] and res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0]) or (res["meta"]["requestPath"]["complex-model2"] == "seldonio/fixed-model:0.2" and res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0])) - if (not r.status_code == 200) or (res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0]): + assert ( + "complex-model" in res["meta"]["requestPath"] + and res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.1" + and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] + ) or ( + res["meta"]["requestPath"]["complex-model2"] + == "seldonio/fixed-model:0.2" + and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ) + if (not r.status_code == 200) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ): break time.sleep(1) assert i < 100 @@ -198,9 +268,9 @@ def test_rolling_update8(self): run("kubectl apply -f ../resources/graph1svc.json", shell=True, check=True) wait_for_rollout("mymodel-mymodel-svc-orch-8e2a24b") wait_for_rollout("mymodel-mymodel-e2eb561") - r = initial_rest_request("mymodel","seldon") + r = initial_rest_request("mymodel", "seldon") assert r.status_code == 200 - assert r.json()["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] + assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] run("kubectl apply -f ../resources/graph4svc.json", shell=True, check=True) i = 0 for i in range(50): @@ -209,7 +279,14 @@ def test_rolling_update8(self): assert r.status_code == 200 res = r.json() print(res) - assert ((res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0])) + assert res["meta"]["requestPath"][ + "complex-model" + ] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [ + 1.0, + 2.0, + 3.0, + 4.0, + ] time.sleep(1) assert i == 49 print("Success for test_rolling_update8") @@ -222,9 +299,9 @@ def test_rolling_update9(self): run("kubectl apply -f ../resources/graph1svc.json", shell=True, check=True) wait_for_rollout("mymodel-mymodel-svc-orch-8e2a24b") wait_for_rollout("mymodel-mymodel-e2eb561") - r = initial_rest_request("mymodel","seldon") + r = initial_rest_request("mymodel", "seldon") assert r.status_code == 200 - assert r.json()["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] + assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] run("kubectl apply -f ../resources/graph5svc.json", shell=True, check=True) i = 0 for i in range(50): @@ -233,7 +310,16 @@ def test_rolling_update9(self): assert r.status_code == 200 res = r.json() print(res) - assert (("complex-model" in res["meta"]["requestPath"] and res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0]) or (res["meta"]["requestPath"]["model1"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] and res["meta"]["requestPath"]["model2"] == "seldonio/fixed-model:0.1")) + assert ( + "complex-model" in res["meta"]["requestPath"] + and res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.1" + and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] + ) or ( + res["meta"]["requestPath"]["model1"] == "seldonio/fixed-model:0.1" + and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] + and res["meta"]["requestPath"]["model2"] == "seldonio/fixed-model:0.1" + ) if (not r.status_code == 200) or ("model1" in res["meta"]["requestPath"]): break time.sleep(1) @@ -248,9 +334,9 @@ def test_rolling_update10(self): run("kubectl apply -f ../resources/graph1svc.json", shell=True, check=True) wait_for_rollout("mymodel-mymodel-svc-orch-8e2a24b") wait_for_rollout("mymodel-mymodel-e2eb561") - r = initial_rest_request("mymodel","seldon") + r = initial_rest_request("mymodel", "seldon") assert r.status_code == 200 - assert r.json()["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0] + assert r.json()["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] run("kubectl apply -f ../resources/graph6svc.json", shell=True, check=True) i = 0 for i in range(50): @@ -259,8 +345,19 @@ def test_rolling_update10(self): assert r.status_code == 200 res = r.json() print(res) - assert (("complex-model" in res["meta"]["requestPath"] and res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [1.0,2.0,3.0,4.0]) or (res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.2" and res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0])) - if (not r.status_code == 200) or (res["data"]["tensor"]["values"] == [5.0,6.0,7.0,8.0]): + assert ( + "complex-model" in res["meta"]["requestPath"] + and res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.1" + and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] + ) or ( + res["meta"]["requestPath"]["complex-model"] + == "seldonio/fixed-model:0.2" + and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ) + if (not r.status_code == 200) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + ): break time.sleep(1) assert i < 100 diff --git a/testing/scripts/test_s2i_python.py b/testing/scripts/test_s2i_python.py index a58c05f064..d41a91037c 100644 --- a/testing/scripts/test_s2i_python.py +++ b/testing/scripts/test_s2i_python.py @@ -11,8 +11,11 @@ def create_s2I_image(s2i_python_version, component_type, api_type): - cmd = S2I_CREATE.replace("#TYPE#", component_type).replace("#API#", api_type).replace("#VERSION#", - s2i_python_version) + cmd = ( + S2I_CREATE.replace("#TYPE#", component_type) + .replace("#API#", api_type) + .replace("#VERSION#", s2i_python_version) + ) print(cmd) run(cmd, shell=True, check=True) @@ -35,7 +38,6 @@ def create_push_s2i_image(s2i_python_version, component_type, api_type): @pytest.mark.usefixtures("s2i_python_version") class TestPythonS2i(object): - def test_build_router_rest(self, s2i_python_version): create_s2I_image(s2i_python_version, "router", "rest") img = get_image_name("router", "rest") @@ -103,7 +105,6 @@ def wait_for_rollout(deploymentName): @pytest.mark.usefixtures("s2i_python_version") class TestPythonS2iK8s(object): - def test_model_rest(self, s2i_python_version): tester = S2IK8S() tester.test_model_rest(s2i_python_version) @@ -126,11 +127,14 @@ def test_combiner_rest(self, s2i_python_version): class S2IK8S(object): - def test_model_rest(self, s2i_python_version): run("kubectl delete sdep --all", shell=True) create_push_s2i_image(s2i_python_version, "model", "rest") - run("kubectl apply -f ../resources/s2i_python_model.json", shell=True, check=True) + run( + "kubectl apply -f ../resources/s2i_python_model.json", + shell=True, + check=True, + ) wait_for_rollout("mymodel-mymodel-8715075") r = initial_rest_request("mymodel", "seldon") arr = np.array([[1, 2, 3]]) @@ -145,7 +149,11 @@ def test_model_rest(self, s2i_python_version): def test_input_transformer_rest(self, s2i_python_version): run("kubectl delete sdep --all", shell=True) create_push_s2i_image(s2i_python_version, "transformer", "rest") - run("kubectl apply -f ../resources/s2i_python_transformer.json", shell=True, check=True) + run( + "kubectl apply -f ../resources/s2i_python_transformer.json", + shell=True, + check=True, + ) wait_for_rollout("mytrans-mytrans-1f278ae") r = initial_rest_request("mytrans", "seldon") arr = np.array([[1, 2, 3]]) @@ -160,7 +168,11 @@ def test_input_transformer_rest(self, s2i_python_version): def test_output_transformer_rest(self, s2i_python_version): run("kubectl delete sdep --all", shell=True) create_push_s2i_image(s2i_python_version, "transformer", "rest") - run("kubectl apply -f ../resources/s2i_python_output_transformer.json", shell=True, check=True) + run( + "kubectl apply -f ../resources/s2i_python_output_transformer.json", + shell=True, + check=True, + ) wait_for_rollout("mytrans-mytrans-52996cb") r = initial_rest_request("mytrans", "seldon") arr = np.array([[1, 2, 3]]) @@ -176,7 +188,11 @@ def test_router_rest(self, s2i_python_version): run("kubectl delete sdep --all", shell=True) create_push_s2i_image(s2i_python_version, "model", "rest") create_push_s2i_image(s2i_python_version, "router", "rest") - run("kubectl apply -f ../resources/s2i_python_router.json", shell=True, check=True) + run( + "kubectl apply -f ../resources/s2i_python_router.json", + shell=True, + check=True, + ) wait_for_rollout("myrouter-myrouter-340ed69") r = initial_rest_request("myrouter", "seldon") arr = np.array([[1, 2, 3]]) @@ -192,7 +208,11 @@ def test_combiner_rest(self, s2i_python_version): run("kubectl delete sdep --all", shell=True) create_push_s2i_image(s2i_python_version, "model", "rest") create_push_s2i_image(s2i_python_version, "combiner", "rest") - run("kubectl apply -f ../resources/s2i_python_combiner.json", shell=True, check=True) + run( + "kubectl apply -f ../resources/s2i_python_combiner.json", + shell=True, + check=True, + ) wait_for_rollout("mycombiner-mycombiner-acc7c4d") r = initial_rest_request("mycombiner", "seldon") arr = np.array([[1, 2, 3]]) @@ -203,4 +223,3 @@ def test_combiner_rest(self, s2i_python_version): assert r.json()["data"]["tensor"]["shape"] == [1, 3] assert r.json()["data"]["tensor"]["values"] == [3, 4, 5] run("kubectl delete sdep --all", shell=True) -