Skip to content

Commit

Permalink
updates for sdk/cli inference examples (#2525)
Browse files Browse the repository at this point in the history
* updates for sdk/cli inference examples

* deploying from azureml-staging erroring out
  • Loading branch information
rjaincc authored Aug 4, 2023
1 parent 8fd0b93 commit 99330a4
Show file tree
Hide file tree
Showing 12 changed files with 56 additions and 84 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,13 @@ set -x
# the sample scoring file available in the same folder as the above notebook

# script inputs
registry_name="azureml-staging"
registry_name="azureml-preview"
subscription_id="<SUBSCRIPTION_ID>"
resource_group_name="<RESOURCE_GROUP>"
workspace_name="<WORKSPACE_NAME>"

# This is the model from system registry that needs to be deployed
model_name="microsoft-beit-base-patch16-224-pt22k-ft22k"

model_label="latest"

deployment_compute="cpu-cluster"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,14 @@ set -x
# the sample scoring file available in the same folder as the above notebook

# script inputs
registry_name="azureml-staging"
registry_name="azureml-preview"
subscription_id="<SUBSCRIPTION_ID>"
resource_group_name="<RESOURCE_GROUP>"
workspace_name="<WORKSPACE_NAME>"

# This is the model from system registry that needs to be deployed
model_name="microsoft-beit-base-patch16-224-pt22k-ft22k"
# using the latest version of the model - not working yet
model_version=2
model_label="latest"

version=$(date +%s)
endpoint_name="image-classification-$version"
Expand Down Expand Up @@ -42,12 +41,15 @@ workspace_info="--resource-group $resource_group_name --workspace-name $workspac

# 2. Check if the model exists in the registry
# need to confirm model show command works for registries outside the tenant (aka system registry)
if ! az ml model show --name $model_name --version $model_version --registry-name $registry_name
if ! az ml model show --name $model_name --label $model_label --registry-name $registry_name
then
echo "Model $model_name:$model_version does not exist in registry $registry_name"
echo "Model $model_name:$model_label does not exist in registry $registry_name"
exit 1
fi

# get the latest model version
model_version=$(az ml model show --name $model_name --label $model_label --registry-name $registry_name --query version --output tsv)

# 3. Deploy the model to an endpoint
# create online endpoint
az ml online-endpoint create --name $endpoint_name $workspace_info || {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,13 @@ set -x
# the commands in this file map to steps in this notebook: https://aka.ms/azureml-infer-batch-sdk-image-instance-segmentation

# script inputs
registry_name="azureml-staging"
registry_name="azureml-preview"
subscription_id="<SUBSCRIPTION_ID>"
resource_group_name="<RESOURCE_GROUP>"
workspace_name="<WORKSPACE_NAME>"

# This is the model from system registry that needs to be deployed
model_name="mask_rcnn_swin-t-p4-w7_fpn_1x_coco"

model_label="latest"

deployment_compute="cpu-cluster"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,13 @@ set -x
# the sample scoring file available in the same folder as the above notebook

# script inputs
registry_name="azureml-staging"
registry_name="azureml-preview"
subscription_id="<SUBSCRIPTION_ID>"
resource_group_name="<RESOURCE_GROUP>"
workspace_name="<WORKSPACE_NAME>"

# This is the model from system registry that needs to be deployed
model_name="mask_rcnn_swin-t-p4-w7_fpn_1x_coco"
# using the latest version of the model - not working yet
model_label="latest"

version=$(date +%s)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,13 @@ set -x
# the commands in this file map to steps in this notebook: https://aka.ms/azureml-infer-batch-sdk-image-object-detection

# script inputs
registry_name="azureml-staging"
registry_name="azureml-preview"
subscription_id="<SUBSCRIPTION_ID>"
resource_group_name="<RESOURCE_GROUP>"
workspace_name="<WORKSPACE_NAME>"

# This is the model from system registry that needs to be deployed
model_name="yolof_r50_c5_8x8_1x_coco"

model_label="latest"

deployment_compute="cpu-cluster"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,13 @@ set -x
# the sample scoring file available in the same folder as the above notebook

# script inputs
registry_name="azureml-staging"
registry_name="azureml-preview"
subscription_id="<SUBSCRIPTION_ID>"
resource_group_name="<RESOURCE_GROUP>"
workspace_name="<WORKSPACE_NAME>"

# This is the model from system registry that needs to be deployed
model_name="yolof_r50_c5_8x8_1x_coco"
# using the latest version of the model - not working yet
model_label="latest"

version=$(date +%s)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
"* MultiLabel: An image can be categorised into more than one class.\n",
" \n",
"### Model\n",
"Models that can perform the `image-classification` task are tagged with `image-classification`. We will use the `microsoft-beit-base-patch16-224-pt22k-ft22k` model in this notebook. If you opened this notebook from a specific model card, remember to replace the specific model name. If you don't find a model that suits your scenario or domain, you can discover and [import models from HuggingFace hub](../../import/import-model-from-huggingface.ipynb) and then use them for inference. \n",
"Models that can perform the `image-classification` task are tagged with `image-classification`. We will use the `microsoft-beit-base-patch16-224-pt22k-ft22k` model in this notebook. If you opened this notebook from a specific model card, remember to replace the specific model name. If you don't find a model that suits your scenario or domain, you can discover and [import models from HuggingFace hub](../../import/import_model_into_registry.ipynb) and then use them for inference. \n",
"\n",
"### Inference data\n",
"We will use the [fridgeObjects](https://cvbp-secondary.z19.web.core.windows.net/datasets/image_classification/fridgeObjects.zip) dataset.\n",
Expand Down Expand Up @@ -76,12 +76,12 @@
"\n",
"workspace_ml_client = MLClient(credential, subscription_id, resource_group, workspace_name)\n",
"\n",
"# the models, fine tuning pipelines and environments are available in the AzureML system registry, \"azureml-staging\"\n",
"# the models, fine tuning pipelines and environments are available in the AzureML system registry, \"azureml-preview\"\n",
"registry_ml_client = MLClient(\n",
" credential,\n",
" subscription_id,\n",
" resource_group,\n",
" registry_name=\"azureml-staging\",\n",
" registry_name=\"azureml-preview\",\n",
")\n",
"# generating a unique timestamp that can be used for names and versions that need to be unique\n",
"timestamp = str(int(time.time()))"
Expand All @@ -92,7 +92,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Create a compute cluster.\n",
"#### Create a compute cluster\n",
"Use the model card from the AzureML system registry to check the minimum required inferencing SKU, referenced as size below. If you already have a sufficient compute cluster, you can simply define the name in compute_name in the following code block."
]
},
Expand Down Expand Up @@ -130,7 +130,7 @@
"source": [
"### 2. Pick a model to deploy\n",
"\n",
"Browse models in the Model Catalog in the AzureML Studio, filtering by the `image-classification` task. In this example, we use the `microsoft-beit-base-patch16-224-pt22k-ft22k ` model. If you have opened this notebook for a different model, replace the model name and version accordingly. "
"Browse models in the Model Catalog in the AzureML Studio, filtering by the `image-classification` task. In this example, we use the `microsoft-beit-base-patch16-224-pt22k-ft22k ` model. If you have opened this notebook for a different model, replace the model name accordingly. "
]
},
{
Expand Down Expand Up @@ -239,7 +239,7 @@
"\n",
"We can provide input images to batch inference either in a folder containing images or in a csv file containing \"image\" named column having images in base 64 format.\n",
"\n",
"Note: If job failed with error Assertion Error (The actual length exceeded max length 100 MB) then please try with less number of input images or use ImageFolder Input mode."
"Note: If job failed with error Assertion Error (`The actual length exceeded max length 100 MB`) then please try with less number of input images or use ImageFolder Input mode."
]
},
{
Expand Down Expand Up @@ -276,12 +276,6 @@
"Image(filename=sample_image)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": []
},
{
"attachments": {},
"cell_type": "markdown",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
"* MultiLabel: An image can be categorised into more than one class.\n",
" \n",
"### Model\n",
"Models that can perform the `image-classification` task are tagged with `image-classification`. We will use the `microsoft-beit-base-patch16-224-pt22k-ft22k` model in this notebook. If you opened this notebook from a specific model card, remember to replace the specific model name. If you don't find a model that suits your scenario or domain, you can discover and [import models from HuggingFace hub](../../import/import-model-from-huggingface.ipynb) and then use them for inference. \n",
"Models that can perform the `image-classification` task are tagged with `image-classification`. We will use the `microsoft-beit-base-patch16-224-pt22k-ft22k` model in this notebook. If you opened this notebook from a specific model card, remember to replace the specific model name. If you don't find a model that suits your scenario or domain, you can discover and [import models from HuggingFace hub](../../import/import_model_into_registry.ipynb) and then use them for inference.\n",
"\n",
"### Inference data\n",
"We will use the [fridgeObjects](https://cvbp-secondary.z19.web.core.windows.net/datasets/image_classification/fridgeObjects.zip) dataset.\n",
Expand All @@ -39,7 +39,7 @@
"### 1. Setup pre-requisites\n",
"* Install dependencies\n",
"* Connect to AzureML Workspace. Learn more at [set up SDK authentication](https://learn.microsoft.com/en-us/azure/machine-learning/how-to-setup-authentication?tabs=sdk). Replace `<WORKSPACE_NAME>`, `<RESOURCE_GROUP>` and `<SUBSCRIPTION_ID>` below.\n",
"* Connect to `azureml-staging` system registry"
"* Connect to `azureml-preview` system registry"
]
},
{
Expand Down Expand Up @@ -72,13 +72,12 @@
" workspace_name = \"<AML_WORKSPACE_NAME>\"\n",
"workspace_ml_client = MLClient(credential, subscription_id, resource_group, workspace_name)\n",
"\n",
"# the models, fine tuning pipelines and environments are available in the AzureML system registry, \"azureml-staging\"\n",
"# the models, fine tuning pipelines and environments are available in the AzureML system registry, \"azureml-preview\"\n",
"registry_ml_client = MLClient(\n",
" credential,\n",
" subscription_id,\n",
" resource_group,\n",
" # workspace_name\n",
" registry_name=\"azureml-staging\",\n",
" registry_name=\"azureml-preview\",\n",
")\n",
"# genrating a unique timestamp that can be used for names and versions that need to be unique\n",
"timestamp = str(int(time.time()))"
Expand All @@ -91,7 +90,7 @@
"source": [
"### 2. Pick a model to deploy\n",
"\n",
"Browse models in the Model Catalog in the AzureML Studio, filtering by the `image-classification` task. In this example, we use the `microsoft-beit-base-patch16-224-pt22k-ft22k ` model. If you have opened this notebook for a different model, replace the model name and version accordingly. "
"Browse models in the Model Catalog in the AzureML Studio, filtering by the `image-classification` task. In this example, we use the `microsoft-beit-base-patch16-224-pt22k-ft22k ` model. If you have opened this notebook for a different model, replace the model name accordingly. "
]
},
{
Expand All @@ -101,9 +100,8 @@
"outputs": [],
"source": [
"model_name = \"microsoft-beit-base-patch16-224-pt22k-ft22k\"\n",
"model_version = \"1\"\n",
"foundation_model = registry_ml_client.models.get(model_name, model_version)\n",
"\n",
"foundation_models = registry_ml_client.models.list(name=model_name)\n",
"foundation_model = max(foundation_models, key=lambda x: x.version)\n",
"print(\n",
" f\"\\n\\nUsing model name: {foundation_model.name}, version: {foundation_model.version}, id: {foundation_model.id} for inferencing\"\n",
")"
Expand Down Expand Up @@ -226,7 +224,7 @@
" endpoint_name=online_endpoint_name,\n",
" model=foundation_model.id,\n",
" # use GPU instance type like Standard_NC6s_v3 for faster explanations\n",
" instance_type=\"Standard_DS3_V2\", # \"Standard_DS3_V2\",\n",
" instance_type=\"Standard_DS3_V2\",\n",
" instance_count=1,\n",
" request_settings=OnlineRequestSettings(\n",
" max_concurrent_requests_per_instance=1, request_timeout_ms=5000, max_queue_wait_ms=500 # 90000,\n",
Expand Down Expand Up @@ -258,7 +256,7 @@
"source": [
"### 5. Test the endpoint with sample data\n",
"\n",
"We will fetch some sample data from the test dataset and submit to online endpoint for inference. We will then show the display the scored labels alongside the ground truth labels"
"We will fetch some sample data from the test dataset and submit to online endpoint for inference. We will then show the scored labels alongside the ground truth labels."
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,12 @@
"\n",
"workspace_ml_client = MLClient(credential, subscription_id, resource_group, workspace_name)\n",
"\n",
"# the models, fine tuning pipelines and environments are available in the AzureML system registry, \"azureml-staging\"\n",
"# the models, fine tuning pipelines and environments are available in the AzureML system registry, \"azureml-preview\"\n",
"registry_ml_client = MLClient(\n",
" credential,\n",
" subscription_id,\n",
" resource_group,\n",
" registry_name=\"azureml-staging\",\n",
" registry_name=\"azureml-preview\",\n",
")\n",
"# generating a unique timestamp that can be used for names and versions that need to be unique\n",
"timestamp = str(int(time.time()))"
Expand Down Expand Up @@ -127,7 +127,7 @@
"source": [
"### 2. Pick a model to deploy\n",
"\n",
"Browse models in the Model Catalog in the AzureML Studio, filtering by the `image-instance-segmentation` task. In this example, we use the `mask_rcnn_swin-t-p4-w7_fpn_1x_coco` model. If you have opened this notebook for a different model, replace the model name and version accordingly. This is a pre-trained model and may not give correct prediction for your dataset. You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. Please refer to the [notebook.](../../finetune/image-instance-segmentation/mmdetection-fridgeobjects-instance-segmentation.ipynb)"
"Browse models in the Model Catalog in the AzureML Studio, filtering by the `image-instance-segmentation` task. In this example, we use the `mask_rcnn_swin-t-p4-w7_fpn_1x_coco` model. If you have opened this notebook for a different model, replace the model name accordingly. This is a pre-trained model and may not give correct prediction for your dataset. We strongly recommend to TRAIN this model on a down-stream task to be able to use it for predictions and inference. Please refer to the [notebook.](../../finetune/image-instance-segmentation/mmdetection-fridgeobjects-instance-segmentation.ipynb)"
]
},
{
Expand All @@ -137,9 +137,8 @@
"outputs": [],
"source": [
"model_name = \"mask_rcnn_swin-t-p4-w7_fpn_1x_coco\"\n",
"\n",
"foundation_model = registry_ml_client.models.get(model_name, label=\"latest\")\n",
"\n",
"foundation_models = registry_ml_client.models.list(name=model_name)\n",
"foundation_model = max(foundation_models, key=lambda x: x.version)\n",
"print(\n",
" f\"\\n\\nUsing model name: {foundation_model.name}, version: {foundation_model.version}, id: {foundation_model.id} for inferencing\"\n",
")"
Expand Down Expand Up @@ -481,11 +480,6 @@
}
],
"metadata": {
"kernelspec": {
"display_name": "temp",
"language": "python",
"name": "python3"
},
"language_info": {
"name": "python"
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
"### 1. Setup pre-requisites\n",
"* Install dependencies\n",
"* Connect to AzureML Workspace. Learn more at [set up SDK authentication](https://learn.microsoft.com/en-us/azure/machine-learning/how-to-setup-authentication?tabs=sdk). Replace `<WORKSPACE_NAME>`, `<RESOURCE_GROUP>` and `<SUBSCRIPTION_ID>` below.\n",
"* Connect to `azureml` system registry"
"* Connect to `azureml-preview` system registry"
]
},
{
Expand Down Expand Up @@ -71,13 +71,12 @@
" workspace_name = \"<AML_WORKSPACE_NAME>\"\n",
"workspace_ml_client = MLClient(credential, subscription_id, resource_group, workspace_name)\n",
"\n",
"# the models, fine tuning pipelines and environments are available in the AzureML system registry, \"azureml-staging\"\n",
"# the models, fine tuning pipelines and environments are available in the AzureML system registry, \"azureml-preview\"\n",
"registry_ml_client = MLClient(\n",
" credential,\n",
" subscription_id,\n",
" resource_group,\n",
" # workspace_name\n",
" registry_name=\"azureml-staging\",\n",
" registry_name=\"azureml-preview\",\n",
")\n",
"# genrating a unique timestamp that can be used for names and versions that need to be unique\n",
"timestamp = str(int(time.time()))"
Expand All @@ -90,7 +89,7 @@
"source": [
"### 2. Pick a model to deploy\n",
"\n",
"Browse models in the Model Catalog in the AzureML Studio, filtering by the `image-instance-segmentation` task. In this example, we use the `mask_rcnn_swin-t-p4-w7_fpn_1x_coco` model. If you have opened this notebook for a different model, replace the model name and version accordingly. This is a pre-trained model and may not give correct prediction for your dataset. You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. Please refer to the [notebook.](../../finetune/image-instance-segmentation/mmdetection-fridgeobjects-instance-segmentation.ipynb)"
"Browse models in the Model Catalog in the AzureML Studio, filtering by the `image-instance-segmentation` task. In this example, we use the `mask_rcnn_swin-t-p4-w7_fpn_1x_coco` model. If you have opened this notebook for a different model, replace the model name accordingly. This is a pre-trained model and may not give correct prediction for your dataset. We strongly recommend to TRAIN this model on a down-stream task to be able to use it for predictions and inference. Please refer to the [notebook](../../finetune/image-instance-segmentation/mmdetection-fridgeobjects-instance-segmentation.ipynb)."
]
},
{
Expand All @@ -100,9 +99,8 @@
"outputs": [],
"source": [
"model_name = \"mask_rcnn_swin-t-p4-w7_fpn_1x_coco\"\n",
"# foundation_model = registry_ml_client.models.get(model_name, label=\"latest\")\n",
"foundation_model = workspace_ml_client.models.get(model_name, label=\"latest\")\n",
"\n",
"foundation_models = registry_ml_client.models.list(name=model_name)\n",
"foundation_model = max(foundation_models, key=lambda x: x.version)\n",
"print(\n",
" f\"\\n\\nUsing model name: {foundation_model.name}, version: {foundation_model.version}, id: {foundation_model.id} for inferencing\"\n",
")"
Expand Down Expand Up @@ -221,7 +219,9 @@
" instance_type=\"Standard_DS3_v2\",\n",
" instance_count=1,\n",
" request_settings=OnlineRequestSettings(\n",
" max_concurrent_requests_per_instance=1, request_timeout_ms=5000, max_queue_wait_ms=500 # 90000,\n",
" max_concurrent_requests_per_instance=1,\n",
" request_timeout_ms=5000, # 90000,\n",
" max_queue_wait_ms=500\n",
" ),\n",
" liveness_probe=ProbeSettings(\n",
" failure_threshold=30,\n",
Expand Down Expand Up @@ -286,13 +286,10 @@
"\n",
"sample_image = os.path.join(dataset_dir, \"images\", \"99.jpg\")\n",
"\n",
"\n",
"def read_image(image_path):\n",
" with open(image_path, \"rb\") as f:\n",
" return f.read()\n",
"\n",
"\n",
"# {\"inputs\":{\"image\":[\"\"]}}\n",
"request_json = {\n",
" \"input_data\": \n",
" {\n",
Expand Down Expand Up @@ -350,11 +347,6 @@
}
],
"metadata": {
"kernelspec": {
"display_name": "temp",
"language": "python",
"name": "python3"
},
"language_info": {
"name": "python"
}
Expand Down
Loading

0 comments on commit 99330a4

Please sign in to comment.