diff --git a/automl/beta/automl_vision_create_model_test.py b/automl/beta/automl_vision_create_model_test.py deleted file mode 100644 index bcb6c323e1e4..000000000000 --- a/automl/beta/automl_vision_create_model_test.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import os - -from google.cloud import automl_v1beta1 as automl -import pytest - -project_id = os.environ["GOOGLE_CLOUD_PROJECT"] -compute_region = "us-central1" - - -@pytest.mark.skip(reason="creates too many models") -def test_model_create_status_delete(capsys): - # create model - client = automl.AutoMlClient() - model_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - project_location = client.location_path(project_id, compute_region) - my_model = { - "display_name": model_name, - "dataset_id": "3946265060617537378", - "image_classification_model_metadata": {"train_budget": 24}, - } - response = client.create_model(project_location, my_model) - operation_name = response.operation.name - assert operation_name - - # cancel operation - response.cancel() diff --git a/automl/beta/automl_vision_model.py b/automl/beta/automl_vision_model.py deleted file mode 100755 index 04aa4c9476e4..000000000000 --- a/automl/beta/automl_vision_model.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations on model -with the Google AutoML Vision API. - -For more information, the documentation at -https://cloud.google.com/vision/automl/docs. -""" - -import argparse -import os - - -def create_model( - project_id, compute_region, dataset_id, model_name, train_budget=24 -): - """Create a model.""" - # [START automl_vision_create_model] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - # model_name = 'MODEL_NAME_HERE' - # train_budget = integer amount for maximum cost of model - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) - - # Set model name and model metadata for the image dataset. - my_model = { - "display_name": model_name, - "dataset_id": dataset_id, - "image_classification_model_metadata": {"train_budget": train_budget} - if train_budget - else {}, - } - - # Create a model with the model metadata in the region. - response = client.create_model(project_location, my_model) - - print("Training operation name: {}".format(response.operation.name)) - print("Training started...") - - # [END automl_vision_create_model] - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - subparsers = parser.add_subparsers(dest="command") - - create_model_parser = subparsers.add_parser( - "create_model", help=create_model.__doc__ - ) - create_model_parser.add_argument("dataset_id") - create_model_parser.add_argument("model_name") - create_model_parser.add_argument( - "train_budget", type=int, nargs="?", default=0 - ) - - project_id = os.environ["PROJECT_ID"] - compute_region = os.environ["REGION_NAME"] - - args = parser.parse_args() - - if args.command == "create_model": - create_model( - project_id, - compute_region, - args.dataset_id, - args.model_name, - args.train_budget, - ) diff --git a/automl/beta/requirements.txt b/automl/beta/requirements.txt index d2157cd180ba..94f5d6c85200 100644 --- a/automl/beta/requirements.txt +++ b/automl/beta/requirements.txt @@ -1 +1 @@ -google-cloud-automl==2.0.0 +google-cloud-automl==2.1.0 diff --git a/automl/snippets/automl_translation_dataset.py b/automl/snippets/automl_translation_dataset.py index e1dd739ad935..674c2c7c485a 100755 --- a/automl/snippets/automl_translation_dataset.py +++ b/automl/snippets/automl_translation_dataset.py @@ -25,138 +25,6 @@ import os -def create_dataset(project_id, compute_region, dataset_name, source, target): - """Create a dataset.""" - # [START automl_translate_create_dataset] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_name = 'DATASET_NAME_HERE' - # source = 'LANGUAGE_CODE_OF_SOURCE_LANGUAGE' - # target = 'LANGUAGE_CODE_OF_TARGET_LANGUAGE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = f"projects/{project_id}/locations/{compute_region}" - - # Specify the source and target language. - dataset_metadata = { - "source_language_code": source, - "target_language_code": target, - } - # Set dataset name and dataset metadata - my_dataset = { - "display_name": dataset_name, - "translation_dataset_metadata": dataset_metadata, - } - - # Create a dataset with the dataset metadata in the region. - dataset = client.create_dataset(parent=project_location, dataset=my_dataset) - - # Display the dataset information - print("Dataset name: {}".format(dataset.name)) - print("Dataset id: {}".format(dataset.name.split("/")[-1])) - print("Dataset display name: {}".format(dataset.display_name)) - print("Translation dataset Metadata:") - print( - "\tsource_language_code: {}".format( - dataset.translation_dataset_metadata.source_language_code - ) - ) - print( - "\ttarget_language_code: {}".format( - dataset.translation_dataset_metadata.target_language_code - ) - ) - print("Dataset create time: {}".format(dataset.create_time)) - - # [END automl_translate_create_dataset] - - -def list_datasets(project_id, compute_region, filter_): - """List Datasets.""" - # [START automl_translate_list_datasets] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # filter_ = 'filter expression here' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = f"projects/{project_id}/locations/{compute_region}" - - # List all the datasets available in the region by applying filter. - request = automl.ListDatasetsRequest(parent=project_location, filter=filter_) - response = client.list_datasets(request=request) - - print("List of datasets:") - for dataset in response: - # Display the dataset information - print("Dataset name: {}".format(dataset.name)) - print("Dataset id: {}".format(dataset.name.split("/")[-1])) - print("Dataset display name: {}".format(dataset.display_name)) - print("Translation dataset metadata:") - print( - "\tsource_language_code: {}".format( - dataset.translation_dataset_metadata.source_language_code - ) - ) - print( - "\ttarget_language_code: {}".format( - dataset.translation_dataset_metadata.target_language_code - ) - ) - print("Dataset create time: {}".format(dataset.create_time)) - - # [END automl_translate_list_datasets] - - -def get_dataset(project_id, compute_region, dataset_id): - """Get the dataset.""" - # [START automl_translate_get_dataset] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the dataset - dataset_full_id = client.dataset_path( - project_id, compute_region, dataset_id - ) - - # Get complete detail of the dataset. - dataset = client.get_dataset(name=dataset_full_id) - - # Display the dataset information - print("Dataset name: {}".format(dataset.name)) - print("Dataset id: {}".format(dataset.name.split("/")[-1])) - print("Dataset display name: {}".format(dataset.display_name)) - print("Translation dataset metadata:") - print( - "\tsource_language_code: {}".format( - dataset.translation_dataset_metadata.source_language_code - ) - ) - print( - "\ttarget_language_code: {}".format( - dataset.translation_dataset_metadata.target_language_code - ) - ) - print("Dataset create time: {}".format(dataset.create_time)) - - # [END automl_translate_get_dataset] - - def import_data(project_id, compute_region, dataset_id, path): """Import sentence pairs to the dataset.""" # [START automl_translate_import_data] @@ -171,9 +39,7 @@ def import_data(project_id, compute_region, dataset_id, path): client = automl.AutoMlClient() # Get the full path of the dataset. - dataset_full_id = client.dataset_path( - project_id, compute_region, dataset_id - ) + dataset_full_id = client.dataset_path(project_id, compute_region, dataset_id) # Get the multiple Google Cloud Storage URIs input_uris = path.split(",") @@ -202,9 +68,7 @@ def delete_dataset(project_id, compute_region, dataset_id): client = automl.AutoMlClient() # Get the full path of the dataset. - dataset_full_id = client.dataset_path( - project_id, compute_region, dataset_id - ) + dataset_full_id = client.dataset_path(project_id, compute_region, dataset_id) # Delete a dataset. response = client.delete_dataset(name=dataset_full_id) @@ -222,21 +86,7 @@ def delete_dataset(project_id, compute_region, dataset_id): ) subparsers = parser.add_subparsers(dest="command") - create_dataset_parser = subparsers.add_parser( - "create_dataset", help=create_dataset.__doc__ - ) - create_dataset_parser.add_argument("dataset_name") - create_dataset_parser.add_argument("source") - create_dataset_parser.add_argument("target") - - list_datasets_parser = subparsers.add_parser( - "list_datasets", help=list_datasets.__doc__ - ) - list_datasets_parser.add_argument("filter", nargs="?", default="") - - import_data_parser = subparsers.add_parser( - "import_data", help=import_data.__doc__ - ) + import_data_parser = subparsers.add_parser("import_data", help=import_data.__doc__) import_data_parser.add_argument("dataset_id") import_data_parser.add_argument("path") @@ -245,28 +95,11 @@ def delete_dataset(project_id, compute_region, dataset_id): ) delete_dataset_parser.add_argument("dataset_id") - get_dataset_parser = subparsers.add_parser( - "get_dataset", help=get_dataset.__doc__ - ) - get_dataset_parser.add_argument("dataset_id") - project_id = os.environ["PROJECT_ID"] compute_region = os.environ["REGION_NAME"] args = parser.parse_args() - if args.command == "create_dataset": - create_dataset( - project_id, - compute_region, - args.dataset_name, - args.source, - args.target, - ) - if args.command == "list_datasets": - list_datasets(project_id, compute_region, args.filter) - if args.command == "get_dataset": - get_dataset(project_id, compute_region, args.dataset_id) if args.command == "import_data": import_data(project_id, compute_region, args.dataset_id, args.path) if args.command == "delete_dataset": diff --git a/automl/snippets/automl_translation_model.py b/automl/snippets/automl_translation_model.py index 4f9d165580fd..1b1e45c91a52 100755 --- a/automl/snippets/automl_translation_model.py +++ b/automl/snippets/automl_translation_model.py @@ -25,38 +25,6 @@ import os -def create_model(project_id, compute_region, dataset_id, model_name): - """Create a model.""" - # [START automl_translate_create_model] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - # model_name = 'MODEL_NAME_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) - - # Set model name and dataset. - my_model = { - "display_name": model_name, - "dataset_id": dataset_id, - "translation_model_metadata": {"base_model": ""}, - } - - # Create a model with the model metadata in the region. - response = client.create_model(parent=project_location, model=my_model) - - print("Training operation name: {}".format(response.operation.name)) - print("Training started...") - - # [END automl_translate_create_model] - - def list_models(project_id, compute_region, filter_): """List all models.""" # [START automl_translate_list_models] @@ -127,60 +95,6 @@ def get_model(project_id, compute_region, model_id): # [END automl_translate_get_model] -def list_model_evaluations(project_id, compute_region, model_id, filter_): - """List model evaluations.""" - # [START automl_translate_list_model_evaluations] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - # filter_ = 'filter expression here' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the model. - model_full_id = client.model_path(project_id, compute_region, model_id) - - print("List of model evaluations:") - request = automl.ListModelEvaluationsRequest( - parent=model_full_id, - filter=filter_ - ) - for element in client.list_model_evaluations(request=request): - print(element) - - # [END automl_translate_list_model_evaluations] - - -def get_model_evaluation( - project_id, compute_region, model_id, model_evaluation_id -): - """Get model evaluation.""" - # [START automl_translate_get_model_evaluation] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - # model_evaluation_id = 'MODEL_EVALUATION_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the model evaluation. - model_path = client.model_path(project_id, compute_region, model_id) - model_evaluation_full_id = f"{model_path}/modelEvaluations/{model_evaluation_id}" - - # Get complete detail of the model evaluation. - response = client.get_model_evaluation(name=model_evaluation_full_id) - - print(response) - - # [END automl_translate_get_model_evaluation] - - def delete_model(project_id, compute_region, model_id): """Delete a model.""" # [START automl_translate_delete_model] @@ -217,9 +131,7 @@ def get_operation_status(operation_full_id): client = automl.AutoMlClient() # Get the latest state of a long-running operation. - response = client._transport.operations_client.get_operation( - operation_full_id - ) + response = client._transport.operations_client.get_operation(operation_full_id) print("Operation status: {}".format(response)) @@ -233,27 +145,7 @@ def get_operation_status(operation_full_id): ) subparsers = parser.add_subparsers(dest="command") - create_model_parser = subparsers.add_parser( - "create_model", help=create_model.__doc__ - ) - create_model_parser.add_argument("dataset_id") - create_model_parser.add_argument("model_name") - - list_model_evaluations_parser = subparsers.add_parser( - "list_model_evaluations", help=list_model_evaluations.__doc__ - ) - list_model_evaluations_parser.add_argument("model_id") - list_model_evaluations_parser.add_argument("filter", nargs="?", default="") - - get_model_evaluation_parser = subparsers.add_parser( - "get_model_evaluation", help=get_model_evaluation.__doc__ - ) - get_model_evaluation_parser.add_argument("model_id") - get_model_evaluation_parser.add_argument("model_evaluation_id") - - get_model_parser = subparsers.add_parser( - "get_model", help=get_model.__doc__ - ) + get_model_parser = subparsers.add_parser("get_model", help=get_model.__doc__) get_model_parser.add_argument("model_id") get_operation_status_parser = subparsers.add_parser( @@ -261,9 +153,7 @@ def get_operation_status(operation_full_id): ) get_operation_status_parser.add_argument("operation_full_id") - list_models_parser = subparsers.add_parser( - "list_models", help=list_models.__doc__ - ) + list_models_parser = subparsers.add_parser("list_models", help=list_models.__doc__) list_models_parser.add_argument("filter", nargs="?", default="") delete_model_parser = subparsers.add_parser( @@ -276,22 +166,10 @@ def get_operation_status(operation_full_id): args = parser.parse_args() - if args.command == "create_model": - create_model( - project_id, compute_region, args.dataset_id, args.model_name - ) if args.command == "list_models": list_models(project_id, compute_region, args.filter) if args.command == "get_model": get_model(project_id, compute_region, args.model_id) - if args.command == "list_model_evaluations": - list_model_evaluations( - project_id, compute_region, args.model_id, args.filter - ) - if args.command == "get_model_evaluation": - get_model_evaluation( - project_id, compute_region, args.model_id, args.model_evaluation_id - ) if args.command == "delete_model": delete_model(project_id, compute_region, args.model_id) if args.command == "get_operation_status": diff --git a/automl/snippets/automl_translation_predict.py b/automl/snippets/automl_translation_predict.py deleted file mode 100644 index 70c14e3634cf..000000000000 --- a/automl/snippets/automl_translation_predict.py +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations on prediction -with the Google AutoML Translation API. - -For more information, see the documentation at -https://cloud.google.com/translate/automl/docs -""" - -import argparse -import os - - -def predict(project_id, compute_region, model_id, file_path): - """Translate the content.""" - # [START automl_translate_predict] - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - # file_path = '/local/path/to/file' - - from google.cloud import automl_v1beta1 as automl - - automl_client = automl.AutoMlClient() - - # Create client for prediction service. - prediction_client = automl.PredictionServiceClient() - - # Get the full path of the model. - model_full_id = automl_client.model_path( - project_id, compute_region, model_id - ) - - # Read the file content for translation. - with open(file_path, "rb") as content_file: - content = content_file.read() - content.decode("utf-8") - - # Set the payload by giving the content of the file. - payload = {"text_snippet": {"content": content}} - - # params is additional domain-specific parameters. - params = {} - - request = automl.PredictRequest( - name=model_full_id, - payload=payload, - params=params - ) - - response = prediction_client.predict(request=request) - translated_content = response.payload[0].translation.translated_content - - print(u"Translated content: {}".format(translated_content.content)) - - # [END automl_translate_predict] - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - subparsers = parser.add_subparsers(dest="command") - - predict_parser = subparsers.add_parser("predict", help=predict.__doc__) - predict_parser.add_argument("model_id") - predict_parser.add_argument("file_path") - - project_id = os.environ["PROJECT_ID"] - compute_region = os.environ["REGION_NAME"] - - args = parser.parse_args() - - if args.command == "predict": - predict(project_id, compute_region, args.model_id, args.file_path) diff --git a/automl/snippets/batch_predict.py b/automl/snippets/batch_predict.py index 427fadf22b27..5b5f810b4ed7 100644 --- a/automl/snippets/batch_predict.py +++ b/automl/snippets/batch_predict.py @@ -33,14 +33,10 @@ def batch_predict(project_id, model_id, input_uri, output_uri): input_config = automl.BatchPredictInputConfig(gcs_source=gcs_source) gcs_destination = automl.GcsDestination(output_uri_prefix=output_uri) - output_config = automl.BatchPredictOutputConfig( - gcs_destination=gcs_destination - ) + output_config = automl.BatchPredictOutputConfig(gcs_destination=gcs_destination) response = prediction_client.batch_predict( - name=model_full_id, - input_config=input_config, - output_config=output_config + name=model_full_id, input_config=input_config, output_config=output_config ) print("Waiting for operation to complete...") diff --git a/automl/snippets/batch_predict_test.py b/automl/snippets/batch_predict_test.py index 2869873a9198..483ea5187ccc 100644 --- a/automl/snippets/batch_predict_test.py +++ b/automl/snippets/batch_predict_test.py @@ -20,9 +20,7 @@ PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] BUCKET_ID = "{}-lcm".format(PROJECT_ID) MODEL_ID = "TEN0000000000000000000" -PREFIX = "TEST_EXPORT_OUTPUT_" + datetime.datetime.now().strftime( - "%Y%m%d%H%M%S" -) +PREFIX = "TEST_EXPORT_OUTPUT_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") def test_batch_predict(capsys): @@ -32,16 +30,8 @@ def test_batch_predict(capsys): try: input_uri = "gs://{}/entity-extraction/input.jsonl".format(BUCKET_ID) output_uri = "gs://{}/{}/".format(BUCKET_ID, PREFIX) - batch_predict.batch_predict( - PROJECT_ID, MODEL_ID, input_uri, output_uri - ) + batch_predict.batch_predict(PROJECT_ID, MODEL_ID, input_uri, output_uri) out, _ = capsys.readouterr() - assert ( - "does not exist" - in out - ) + assert "does not exist" in out except Exception as e: - assert ( - "does not exist" - in e.message - ) + assert "does not exist" in e.message diff --git a/automl/snippets/dataset_test.py b/automl/snippets/dataset_test.py index 6ae5326c026b..a2512d57e40a 100644 --- a/automl/snippets/dataset_test.py +++ b/automl/snippets/dataset_test.py @@ -15,55 +15,31 @@ # limitations under the License. import os -import uuid - -import pytest import automl_translation_dataset -project_id = os.environ["GOOGLE_CLOUD_PROJECT"] -compute_region = "us-central1" -dataset_id = "TRL3876092572857648864" - - -@pytest.mark.slow -def test_dataset_create_import_delete(capsys): - # create dataset - dataset_name = f"test_{uuid.uuid4().hex[:27]}" - automl_translation_dataset.create_dataset( - project_id, compute_region, dataset_name, "en", "ja" - ) - out, _ = capsys.readouterr() - create_dataset_output = out.splitlines() - assert "Dataset id: " in create_dataset_output[1] - - # import data - dataset_id = create_dataset_output[1].split()[2] - data = "gs://{}-vcm/en-ja.csv".format(project_id) - automl_translation_dataset.import_data( - project_id, compute_region, dataset_id, data - ) - out, _ = capsys.readouterr() - assert "Data imported." in out - - # delete dataset - automl_translation_dataset.delete_dataset( - project_id, compute_region, dataset_id - ) - out, _ = capsys.readouterr() - assert "Dataset deleted." in out - - -def test_dataset_list_get(capsys): - # list datasets - automl_translation_dataset.list_datasets(project_id, compute_region, "") - out, _ = capsys.readouterr() - list_dataset_output = out.splitlines() - assert "Dataset id: " in list_dataset_output[2] - - # get dataset - automl_translation_dataset.get_dataset( - project_id, compute_region, dataset_id - ) - out, _ = capsys.readouterr() - assert "Dataset name: " in out +PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"] +BUCKET_ID = "{}-lcm".format(PROJECT_ID) +COMPUTE_REGION = "us-central1" +DATASET_ID = "TRL00000000000000" + + +def test_import_dataset(capsys): + # As importing a dataset can take a long time and only four operations can + # be run on a dataset at once. Try to import into a nonexistent dataset and + # confirm that the dataset was not found, but other elements of the request + # were valid. + try: + data = "gs://{}/sentiment-analysis/dataset.csv".format(BUCKET_ID) + automl_translation_dataset.import_data( + PROJECT_ID, COMPUTE_REGION, DATASET_ID, data + ) + out, _ = capsys.readouterr() + assert ( + "The Dataset doesn't exist or is inaccessible for use with AutoMl." in out + ) + except Exception as e: + assert ( + "The Dataset doesn't exist or is inaccessible for use with AutoMl." + in e.message + ) diff --git a/automl/snippets/delete_dataset.py b/automl/snippets/delete_dataset.py index 23846b43ce1c..284ca704d9a2 100644 --- a/automl/snippets/delete_dataset.py +++ b/automl/snippets/delete_dataset.py @@ -24,9 +24,7 @@ def delete_dataset(project_id, dataset_id): client = automl.AutoMlClient() # Get the full path of the dataset - dataset_full_id = client.dataset_path( - project_id, "us-central1", dataset_id - ) + dataset_full_id = client.dataset_path(project_id, "us-central1", dataset_id) response = client.delete_dataset(name=dataset_full_id) print("Dataset deleted. {}".format(response.result())) diff --git a/automl/snippets/export_dataset.py b/automl/snippets/export_dataset.py index 6be80907985d..bd68ca3116a6 100644 --- a/automl/snippets/export_dataset.py +++ b/automl/snippets/export_dataset.py @@ -26,9 +26,7 @@ def export_dataset(project_id, dataset_id, gcs_uri): client = automl.AutoMlClient() # Get the full path of the dataset - dataset_full_id = client.dataset_path( - project_id, "us-central1", dataset_id - ) + dataset_full_id = client.dataset_path(project_id, "us-central1", dataset_id) gcs_destination = automl.GcsDestination(output_uri_prefix=gcs_uri) output_config = automl.OutputConfig(gcs_destination=gcs_destination) diff --git a/automl/snippets/export_dataset_test.py b/automl/snippets/export_dataset_test.py index de8bfe5aefa0..62b899b06c5c 100644 --- a/automl/snippets/export_dataset_test.py +++ b/automl/snippets/export_dataset_test.py @@ -19,9 +19,7 @@ PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] BUCKET_ID = "{}-lcm".format(PROJECT_ID) -PREFIX = "TEST_EXPORT_OUTPUT_" + datetime.datetime.now().strftime( - "%Y%m%d%H%M%S" -) +PREFIX = "TEST_EXPORT_OUTPUT_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") DATASET_ID = "TEN0000000000000000000" @@ -36,8 +34,7 @@ def test_export_dataset(capsys): ) out, _ = capsys.readouterr() assert ( - "The Dataset doesn't exist or is inaccessible for use with AutoMl." - in out + "The Dataset doesn't exist or is inaccessible for use with AutoMl." in out ) except Exception as e: assert ( diff --git a/automl/snippets/get_dataset.py b/automl/snippets/get_dataset.py index b0ce2c8ae93d..6fa55a2ec965 100644 --- a/automl/snippets/get_dataset.py +++ b/automl/snippets/get_dataset.py @@ -29,9 +29,7 @@ def get_dataset(project_id, dataset_id): client = automl.AutoMlClient() # Get the full path of the dataset - dataset_full_id = client.dataset_path( - project_id, "us-central1", dataset_id - ) + dataset_full_id = client.dataset_path(project_id, "us-central1", dataset_id) dataset = client.get_dataset(name=dataset_full_id) # Display the dataset information diff --git a/automl/snippets/get_model_evaluation.py b/automl/snippets/get_model_evaluation.py index 905410218062..1f526664bf36 100644 --- a/automl/snippets/get_model_evaluation.py +++ b/automl/snippets/get_model_evaluation.py @@ -39,9 +39,7 @@ def get_model_evaluation(project_id, model_id, model_evaluation_id): print("Model evaluation name: {}".format(response.name)) print("Model annotation spec id: {}".format(response.annotation_spec_id)) print("Create Time: {}".format(response.create_time)) - print( - "Evaluation example count: {}".format(response.evaluated_example_count) - ) + print("Evaluation example count: {}".format(response.evaluated_example_count)) # [END automl_language_sentiment_analysis_get_model_evaluation] # [END automl_language_text_classification_get_model_evaluation] # [END automl_translate_get_model_evaluation] diff --git a/automl/snippets/get_model_evaluation_test.py b/automl/snippets/get_model_evaluation_test.py index 8b868cb362cb..7d694e55f728 100644 --- a/automl/snippets/get_model_evaluation_test.py +++ b/automl/snippets/get_model_evaluation_test.py @@ -38,8 +38,6 @@ def model_evaluation_id(): def test_get_model_evaluation(capsys, model_evaluation_id): - get_model_evaluation.get_model_evaluation( - PROJECT_ID, MODEL_ID, model_evaluation_id - ) + get_model_evaluation.get_model_evaluation(PROJECT_ID, MODEL_ID, model_evaluation_id) out, _ = capsys.readouterr() assert "Model evaluation name: " in out diff --git a/automl/snippets/get_operation_status.py b/automl/snippets/get_operation_status.py index d2ac0cc2819d..5d17dd5fc964 100644 --- a/automl/snippets/get_operation_status.py +++ b/automl/snippets/get_operation_status.py @@ -24,9 +24,7 @@ def get_operation_status(operation_full_id): client = automl.AutoMlClient() # Get the latest state of a long-running operation. - response = client._transport.operations_client.get_operation( - operation_full_id - ) + response = client._transport.operations_client.get_operation(operation_full_id) print("Name: {}".format(response.name)) print("Operation details:") diff --git a/automl/snippets/import_dataset.py b/automl/snippets/import_dataset.py index 3334fb3a4e9a..cdda1431feeb 100644 --- a/automl/snippets/import_dataset.py +++ b/automl/snippets/import_dataset.py @@ -25,9 +25,7 @@ def import_dataset(project_id, dataset_id, path): client = automl.AutoMlClient() # Get the full path of the dataset. - dataset_full_id = client.dataset_path( - project_id, "us-central1", dataset_id - ) + dataset_full_id = client.dataset_path(project_id, "us-central1", dataset_id) # Get the multiple Google Cloud Storage URIs input_uris = path.split(",") gcs_source = automl.GcsSource(input_uris=input_uris) diff --git a/automl/snippets/import_dataset_test.py b/automl/snippets/import_dataset_test.py index 35d23edc7e8f..2b4f578f7d66 100644 --- a/automl/snippets/import_dataset_test.py +++ b/automl/snippets/import_dataset_test.py @@ -31,8 +31,7 @@ def test_import_dataset(capsys): import_dataset.import_dataset(PROJECT_ID, DATASET_ID, data) out, _ = capsys.readouterr() assert ( - "The Dataset doesn't exist or is inaccessible for use with AutoMl." - in out + "The Dataset doesn't exist or is inaccessible for use with AutoMl." in out ) except Exception as e: assert ( diff --git a/automl/snippets/language_entity_extraction_create_dataset_test.py b/automl/snippets/language_entity_extraction_create_dataset_test.py index 13cd99115676..c4841cdda4fa 100644 --- a/automl/snippets/language_entity_extraction_create_dataset_test.py +++ b/automl/snippets/language_entity_extraction_create_dataset_test.py @@ -26,17 +26,13 @@ def test_entity_extraction_create_dataset(capsys): # create dataset dataset_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - language_entity_extraction_create_dataset.create_dataset( - PROJECT_ID, dataset_name - ) + language_entity_extraction_create_dataset.create_dataset(PROJECT_ID, dataset_name) out, _ = capsys.readouterr() assert "Dataset id: " in out # Delete the created dataset dataset_id = out.splitlines()[1].split()[2] client = automl.AutoMlClient() - dataset_full_id = client.dataset_path( - PROJECT_ID, "us-central1", dataset_id - ) + dataset_full_id = client.dataset_path(PROJECT_ID, "us-central1", dataset_id) response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/automl/snippets/language_entity_extraction_predict.py b/automl/snippets/language_entity_extraction_predict.py index 8caea632a716..f6937d2ac331 100644 --- a/automl/snippets/language_entity_extraction_predict.py +++ b/automl/snippets/language_entity_extraction_predict.py @@ -26,28 +26,18 @@ def predict(project_id, model_id, content): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = automl.AutoMlClient.model_path( - project_id, "us-central1", model_id - ) + model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1", model_id) # Supported mime_types: 'text/plain', 'text/html' # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet - text_snippet = automl.TextSnippet( - content=content, mime_type="text/plain" - ) + text_snippet = automl.TextSnippet(content=content, mime_type="text/plain") payload = automl.ExamplePayload(text_snippet=text_snippet) response = prediction_client.predict(name=model_full_id, payload=payload) for annotation_payload in response.payload: - print( - "Text Extract Entity Types: {}".format( - annotation_payload.display_name - ) - ) - print( - "Text Score: {}".format(annotation_payload.text_extraction.score) - ) + print("Text Extract Entity Types: {}".format(annotation_payload.display_name)) + print("Text Score: {}".format(annotation_payload.text_extraction.score)) text_segment = annotation_payload.text_extraction.text_segment print("Text Extract Entity Content: {}".format(text_segment.content)) print("Text Start Offset: {}".format(text_segment.start_offset)) diff --git a/automl/snippets/language_sentiment_analysis_create_dataset_test.py b/automl/snippets/language_sentiment_analysis_create_dataset_test.py index 1ac54461984f..1065c88099ae 100644 --- a/automl/snippets/language_sentiment_analysis_create_dataset_test.py +++ b/automl/snippets/language_sentiment_analysis_create_dataset_test.py @@ -25,17 +25,13 @@ def test_sentiment_analysis_create_dataset(capsys): dataset_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - language_sentiment_analysis_create_dataset.create_dataset( - PROJECT_ID, dataset_name - ) + language_sentiment_analysis_create_dataset.create_dataset(PROJECT_ID, dataset_name) out, _ = capsys.readouterr() assert "Dataset id: " in out # Delete the created dataset dataset_id = out.splitlines()[1].split()[2] client = automl.AutoMlClient() - dataset_full_id = client.dataset_path( - PROJECT_ID, "us-central1", dataset_id - ) + dataset_full_id = client.dataset_path(PROJECT_ID, "us-central1", dataset_id) response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/automl/snippets/language_sentiment_analysis_predict.py b/automl/snippets/language_sentiment_analysis_predict.py index e2f5c77799a2..8036d262540d 100644 --- a/automl/snippets/language_sentiment_analysis_predict.py +++ b/automl/snippets/language_sentiment_analysis_predict.py @@ -26,23 +26,17 @@ def predict(project_id, model_id, content): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = automl.AutoMlClient.model_path( - project_id, "us-central1", model_id - ) + model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1", model_id) # Supported mime_types: 'text/plain', 'text/html' # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet - text_snippet = automl.TextSnippet( - content=content, mime_type="text/plain" - ) + text_snippet = automl.TextSnippet(content=content, mime_type="text/plain") payload = automl.ExamplePayload(text_snippet=text_snippet) response = prediction_client.predict(name=model_full_id, payload=payload) for annotation_payload in response.payload: - print( - "Predicted class name: {}".format(annotation_payload.display_name) - ) + print("Predicted class name: {}".format(annotation_payload.display_name)) print( "Predicted sentiment score: {}".format( annotation_payload.text_sentiment.sentiment diff --git a/automl/snippets/language_text_classification_create_dataset_test.py b/automl/snippets/language_text_classification_create_dataset_test.py index a00e6eb167bf..2395e37c7608 100644 --- a/automl/snippets/language_text_classification_create_dataset_test.py +++ b/automl/snippets/language_text_classification_create_dataset_test.py @@ -25,17 +25,13 @@ def test_text_classification_create_dataset(capsys): dataset_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - language_text_classification_create_dataset.create_dataset( - PROJECT_ID, dataset_name - ) + language_text_classification_create_dataset.create_dataset(PROJECT_ID, dataset_name) out, _ = capsys.readouterr() assert "Dataset id: " in out # Delete the created dataset dataset_id = out.splitlines()[1].split()[2] client = automl.AutoMlClient() - dataset_full_id = client.dataset_path( - PROJECT_ID, "us-central1", dataset_id - ) + dataset_full_id = client.dataset_path(PROJECT_ID, "us-central1", dataset_id) response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/automl/snippets/language_text_classification_create_model.py b/automl/snippets/language_text_classification_create_model.py index ed996def9d2c..00b5114f0125 100644 --- a/automl/snippets/language_text_classification_create_model.py +++ b/automl/snippets/language_text_classification_create_model.py @@ -38,6 +38,6 @@ def create_model(project_id, dataset_id, display_name): # Create a model with the model metadata in the region. response = client.create_model(parent=project_location, model=model) - print(u"Training operation name: {}".format(response.operation.name)) + print("Training operation name: {}".format(response.operation.name)) print("Training started...") # [END automl_language_text_classification_create_model] diff --git a/automl/snippets/language_text_classification_predict.py b/automl/snippets/language_text_classification_predict.py index 4baa847850e1..cfc4a6e43780 100644 --- a/automl/snippets/language_text_classification_predict.py +++ b/automl/snippets/language_text_classification_predict.py @@ -26,26 +26,18 @@ def predict(project_id, model_id, content): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = automl.AutoMlClient.model_path( - project_id, "us-central1", model_id - ) + model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1", model_id) # Supported mime_types: 'text/plain', 'text/html' # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet - text_snippet = automl.TextSnippet( - content=content, mime_type="text/plain" - ) + text_snippet = automl.TextSnippet(content=content, mime_type="text/plain") payload = automl.ExamplePayload(text_snippet=text_snippet) response = prediction_client.predict(name=model_full_id, payload=payload) for annotation_payload in response.payload: + print(u"Predicted class name: {}".format(annotation_payload.display_name)) print( - u"Predicted class name: {}".format(annotation_payload.display_name) - ) - print( - u"Predicted class score: {}".format( - annotation_payload.classification.score - ) + u"Predicted class score: {}".format(annotation_payload.classification.score) ) # [END automl_language_text_classification_predict] diff --git a/automl/snippets/list_model_evaluations.py b/automl/snippets/list_model_evaluations.py index c5e29d7081dd..8037553c3ccd 100644 --- a/automl/snippets/list_model_evaluations.py +++ b/automl/snippets/list_model_evaluations.py @@ -34,17 +34,9 @@ def list_model_evaluations(project_id, model_id): print("List of model evaluations:") for evaluation in client.list_model_evaluations(parent=model_full_id, filter=""): print("Model evaluation name: {}".format(evaluation.name)) - print( - "Model annotation spec id: {}".format( - evaluation.annotation_spec_id - ) - ) + print("Model annotation spec id: {}".format(evaluation.annotation_spec_id)) print("Create Time: {}".format(evaluation.create_time)) - print( - "Evaluation example count: {}".format( - evaluation.evaluated_example_count - ) - ) + print("Evaluation example count: {}".format(evaluation.evaluated_example_count)) # [END automl_language_sentiment_analysis_list_model_evaluations] # [END automl_language_text_classification_list_model_evaluations] # [END automl_translate_list_model_evaluations] diff --git a/automl/snippets/list_models.py b/automl/snippets/list_models.py index d46ef1046717..80913105af17 100644 --- a/automl/snippets/list_models.py +++ b/automl/snippets/list_models.py @@ -31,10 +31,7 @@ def list_models(project_id): print("List of models:") for model in response: # Display the model information. - if ( - model.deployment_state - == automl.Model.DeploymentState.DEPLOYED - ): + if model.deployment_state == automl.Model.DeploymentState.DEPLOYED: deployment_state = "deployed" else: deployment_state = "undeployed" diff --git a/automl/snippets/model_test.py b/automl/snippets/model_test.py index da5f806feb56..c64e45ecd591 100644 --- a/automl/snippets/model_test.py +++ b/automl/snippets/model_test.py @@ -14,42 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import datetime import os -from google.cloud import automl_v1beta1 as automl -import pytest - import automl_translation_model project_id = os.environ["GOOGLE_CLOUD_PROJECT"] compute_region = "us-central1" -@pytest.mark.skip(reason="creates too many models") -def test_model_create_status_delete(capsys): - # create model - client = automl.AutoMlClient() - model_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - project_location = f"projects/{project_id}/locations/{compute_region}" - my_model = { - "display_name": model_name, - "dataset_id": "3876092572857648864", - "translation_model_metadata": {"base_model": ""}, - } - response = client.create_model(parent=project_location, model=my_model) - operation_name = response.operation.name - assert operation_name - - # get operation status - automl_translation_model.get_operation_status(operation_name) - out, _ = capsys.readouterr() - assert "Operation status: " in out - - # cancel operation - response.cancel() - - def test_model_list_get_evaluate(capsys): # list models automl_translation_model.list_models(project_id, compute_region, "") @@ -62,19 +34,3 @@ def test_model_list_get_evaluate(capsys): automl_translation_model.get_model(project_id, compute_region, model_id) out, _ = capsys.readouterr() assert "Model name: " in out - - # list model evaluations - automl_translation_model.list_model_evaluations( - project_id, compute_region, model_id, "" - ) - out, _ = capsys.readouterr() - list_evals_output = out.splitlines() - assert "name: " in list_evals_output[1] - - # get model evaluation - model_evaluation_id = list_evals_output[1].split("/")[-1][:-1] - automl_translation_model.get_model_evaluation( - project_id, compute_region, model_id, model_evaluation_id - ) - out, _ = capsys.readouterr() - assert model_evaluation_id in out diff --git a/automl/snippets/noxfile.py b/automl/snippets/noxfile.py index bca0522ec4d9..bbd25fcdb5e7 100644 --- a/automl/snippets/noxfile.py +++ b/automl/snippets/noxfile.py @@ -38,28 +38,25 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": ["2.7"], # Old samples are opted out of enforcing Python type hints # All new samples should feature them - 'enforce_type_hints': False, - + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -74,12 +71,12 @@ def get_pytest_env_vars() -> Dict[str, str]: ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret @@ -88,7 +85,7 @@ def get_pytest_env_vars() -> Dict[str, str]: ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -137,7 +134,7 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG['enforce_type_hints']: + if not TEST_CONFIG["enforce_type_hints"]: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -146,9 +143,11 @@ def lint(session: nox.sessions.Session) -> None: args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) + + # # Black # @@ -161,6 +160,7 @@ def blacken(session: nox.sessions.Session) -> None: session.run("black", *python_files) + # # Sample Tests # @@ -169,7 +169,9 @@ def blacken(session: nox.sessions.Session) -> None: PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): session.install("-r", "requirements.txt") @@ -200,9 +202,9 @@ def py(session: nox.sessions.Session) -> None: if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # diff --git a/automl/snippets/predict_test.py b/automl/snippets/predict_test.py deleted file mode 100644 index d00a4658d7bb..000000000000 --- a/automl/snippets/predict_test.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import automl_translation_predict - -project_id = os.environ["GOOGLE_CLOUD_PROJECT"] -compute_region = "us-central1" - - -def test_predict(capsys): - model_id = "TRL3128559826197068699" - automl_translation_predict.predict( - project_id, compute_region, model_id, "resources/input.txt" - ) - out, _ = capsys.readouterr() - assert "Translated content: " in out diff --git a/automl/snippets/requirements.txt b/automl/snippets/requirements.txt index 6ca3167b4e5d..e16fe8f2bbb2 100644 --- a/automl/snippets/requirements.txt +++ b/automl/snippets/requirements.txt @@ -1,3 +1,3 @@ google-cloud-translate==3.0.2 google-cloud-storage==1.33.0 -google-cloud-automl==2.0.0 +google-cloud-automl==2.1.0 diff --git a/automl/snippets/translate_create_dataset_test.py b/automl/snippets/translate_create_dataset_test.py index 2f6dd2238f47..d25f0f8e55f6 100644 --- a/automl/snippets/translate_create_dataset_test.py +++ b/automl/snippets/translate_create_dataset_test.py @@ -33,8 +33,6 @@ def test_translate_create_dataset(capsys): # Delete the created dataset dataset_id = out.splitlines()[1].split()[2] client = automl.AutoMlClient() - dataset_full_id = client.dataset_path( - PROJECT_ID, "us-central1", dataset_id - ) + dataset_full_id = client.dataset_path(PROJECT_ID, "us-central1", dataset_id) response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/automl/snippets/translate_predict.py b/automl/snippets/translate_predict.py index 455603198c36..9b93f28485eb 100644 --- a/automl/snippets/translate_predict.py +++ b/automl/snippets/translate_predict.py @@ -26,9 +26,7 @@ def predict(project_id, model_id, file_path): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = automl.AutoMlClient.model_path( - project_id, "us-central1", model_id - ) + model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1", model_id) # Read the file content for translation. with open(file_path, "rb") as content_file: diff --git a/automl/snippets/vision_classification_create_dataset_test.py b/automl/snippets/vision_classification_create_dataset_test.py index efd32810ab53..7b86f42e4c15 100644 --- a/automl/snippets/vision_classification_create_dataset_test.py +++ b/automl/snippets/vision_classification_create_dataset_test.py @@ -28,17 +28,13 @@ def test_vision_classification_create_dataset(capsys): # create dataset dataset_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - vision_classification_create_dataset.create_dataset( - PROJECT_ID, dataset_name - ) + vision_classification_create_dataset.create_dataset(PROJECT_ID, dataset_name) out, _ = capsys.readouterr() assert "Dataset id: " in out # Delete the created dataset dataset_id = out.splitlines()[1].split()[2] client = automl.AutoMlClient() - dataset_full_id = client.dataset_path( - PROJECT_ID, "us-central1", dataset_id - ) + dataset_full_id = client.dataset_path(PROJECT_ID, "us-central1", dataset_id) response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/automl/snippets/vision_classification_deploy_model_node_count.py b/automl/snippets/vision_classification_deploy_model_node_count.py index 98be955c3392..3884477d6727 100644 --- a/automl/snippets/vision_classification_deploy_model_node_count.py +++ b/automl/snippets/vision_classification_deploy_model_node_count.py @@ -28,13 +28,10 @@ def deploy_model(project_id, model_id): # node count determines the number of nodes to deploy the model on. # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageclassificationmodeldeploymentmetadata - metadata = automl.ImageClassificationModelDeploymentMetadata( - node_count=2 - ) + metadata = automl.ImageClassificationModelDeploymentMetadata(node_count=2) request = automl.DeployModelRequest( - name=model_full_id, - image_classification_model_deployment_metadata=metadata + name=model_full_id, image_classification_model_deployment_metadata=metadata ) response = client.deploy_model(request=request) diff --git a/automl/snippets/vision_classification_deploy_model_node_count_test.py b/automl/snippets/vision_classification_deploy_model_node_count_test.py index 3f6ff430a4d2..c565b58de059 100644 --- a/automl/snippets/vision_classification_deploy_model_node_count_test.py +++ b/automl/snippets/vision_classification_deploy_model_node_count_test.py @@ -28,9 +28,7 @@ def test_classification_deploy_model_with_node_count(capsys): # nonexistent model and confirm that the model was not found, but other # elements of the request were valid. try: - vision_classification_deploy_model_node_count.deploy_model( - PROJECT_ID, MODEL_ID - ) + vision_classification_deploy_model_node_count.deploy_model(PROJECT_ID, MODEL_ID) out, _ = capsys.readouterr() assert "The model does not exist" in out except Exception as e: diff --git a/automl/snippets/vision_classification_predict.py b/automl/snippets/vision_classification_predict.py index 4b1a2f7a8394..355de0bc08ad 100644 --- a/automl/snippets/vision_classification_predict.py +++ b/automl/snippets/vision_classification_predict.py @@ -26,9 +26,7 @@ def predict(project_id, model_id, file_path): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = automl.AutoMlClient.model_path( - project_id, "us-central1", model_id - ) + model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1", model_id) # Read the file. with open(file_path, "rb") as content_file: @@ -42,11 +40,7 @@ def predict(project_id, model_id, file_path): # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest params = {"score_threshold": "0.8"} - request = automl.PredictRequest( - name=model_full_id, - payload=payload, - params=params - ) + request = automl.PredictRequest(name=model_full_id, payload=payload, params=params) response = prediction_client.predict(request=request) print("Prediction results:") diff --git a/automl/snippets/vision_object_detection_create_dataset_test.py b/automl/snippets/vision_object_detection_create_dataset_test.py index e7a82e3a9e03..97cff120b6d6 100644 --- a/automl/snippets/vision_object_detection_create_dataset_test.py +++ b/automl/snippets/vision_object_detection_create_dataset_test.py @@ -28,17 +28,13 @@ def test_vision_object_detection_create_dataset(capsys): # create dataset dataset_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - vision_object_detection_create_dataset.create_dataset( - PROJECT_ID, dataset_name - ) + vision_object_detection_create_dataset.create_dataset(PROJECT_ID, dataset_name) out, _ = capsys.readouterr() assert "Dataset id: " in out # Delete the created dataset dataset_id = out.splitlines()[1].split()[2] client = automl.AutoMlClient() - dataset_full_id = client.dataset_path( - PROJECT_ID, "us-central1", dataset_id - ) + dataset_full_id = client.dataset_path(PROJECT_ID, "us-central1", dataset_id) response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/automl/snippets/vision_object_detection_deploy_model_node_count.py b/automl/snippets/vision_object_detection_deploy_model_node_count.py index 9a15d2287b97..cc65941b1027 100644 --- a/automl/snippets/vision_object_detection_deploy_model_node_count.py +++ b/automl/snippets/vision_object_detection_deploy_model_node_count.py @@ -28,9 +28,7 @@ def deploy_model(project_id, model_id): # node count determines the number of nodes to deploy the model on. # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageobjectdetectionmodeldeploymentmetadata - metadata = automl.ImageObjectDetectionModelDeploymentMetadata( - node_count=2 - ) + metadata = automl.ImageObjectDetectionModelDeploymentMetadata(node_count=2) request = automl.DeployModelRequest( name=model_full_id, diff --git a/automl/snippets/vision_object_detection_predict.py b/automl/snippets/vision_object_detection_predict.py index 2a059d40396a..51ed0ac029fa 100644 --- a/automl/snippets/vision_object_detection_predict.py +++ b/automl/snippets/vision_object_detection_predict.py @@ -26,9 +26,7 @@ def predict(project_id, model_id, file_path): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = automl.AutoMlClient.model_path( - project_id, "us-central1", model_id - ) + model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1", model_id) # Read the file. with open(file_path, "rb") as content_file: @@ -42,21 +40,13 @@ def predict(project_id, model_id, file_path): # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest params = {"score_threshold": "0.8"} - request = automl.PredictRequest( - name=model_full_id, - payload=payload, - params=params - ) + request = automl.PredictRequest(name=model_full_id, payload=payload, params=params) response = prediction_client.predict(request=request) print("Prediction results:") for result in response.payload: print("Predicted class name: {}".format(result.display_name)) - print( - "Predicted class score: {}".format( - result.image_object_detection.score - ) - ) + print("Predicted class score: {}".format(result.image_object_detection.score)) bounding_box = result.image_object_detection.bounding_box print("Normalized Vertices:") for vertex in bounding_box.normalized_vertices: diff --git a/automl/tables/requirements.txt b/automl/tables/requirements.txt index d2157cd180ba..94f5d6c85200 100644 --- a/automl/tables/requirements.txt +++ b/automl/tables/requirements.txt @@ -1 +1 @@ -google-cloud-automl==2.0.0 +google-cloud-automl==2.1.0